aboutsummaryrefslogtreecommitdiff
path: root/clang
diff options
context:
space:
mode:
Diffstat (limited to 'clang')
-rw-r--r--clang/docs/LanguageExtensions.rst24
-rw-r--r--clang/docs/ReleaseNotes.rst3
-rw-r--r--clang/include/clang/AST/OpenACCClause.h2
-rw-r--r--clang/include/clang/AST/Type.h14
-rw-r--r--clang/include/clang/Basic/BuiltinsAMDGPU.def2
-rw-r--r--clang/include/clang/Basic/Cuda.h1
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td6
-rw-r--r--clang/include/clang/Basic/OpenACCClauses.def22
-rw-r--r--clang/include/clang/Basic/TokenKinds.def2
-rw-r--r--clang/include/clang/Basic/riscv_vector.td3
-rw-r--r--clang/include/clang/Lex/DependencyDirectivesScanner.h3
-rw-r--r--clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h3
-rw-r--r--clang/lib/AST/CMakeLists.txt1
-rw-r--r--clang/lib/AST/Interp/ByteCodeExprGen.cpp102
-rw-r--r--clang/lib/AST/Interp/Context.cpp15
-rw-r--r--clang/lib/AST/Interp/Context.h2
-rw-r--r--clang/lib/AST/Interp/Descriptor.cpp1
-rw-r--r--clang/lib/AST/Interp/Disasm.cpp3
-rw-r--r--clang/lib/AST/Interp/Function.cpp3
-rw-r--r--clang/lib/AST/Interp/Interp.cpp39
-rw-r--r--clang/lib/AST/Interp/Interp.h107
-rw-r--r--clang/lib/AST/Interp/InterpFrame.cpp1
-rw-r--r--clang/lib/AST/Interp/InterpStack.cpp1
-rw-r--r--clang/lib/AST/Interp/InterpStack.h3
-rw-r--r--clang/lib/AST/Interp/MemberPointer.cpp76
-rw-r--r--clang/lib/AST/Interp/MemberPointer.h112
-rw-r--r--clang/lib/AST/Interp/Opcodes.td18
-rw-r--r--clang/lib/AST/Interp/Pointer.cpp1
-rw-r--r--clang/lib/AST/Interp/Pointer.h1
-rw-r--r--clang/lib/AST/Interp/PrimType.cpp1
-rw-r--r--clang/lib/AST/Interp/PrimType.h8
-rw-r--r--clang/lib/AST/OpenACCClause.cpp2
-rw-r--r--clang/lib/AST/ParentMap.cpp16
-rw-r--r--clang/lib/AST/TextNodeDumper.cpp21
-rw-r--r--clang/lib/AST/Type.cpp38
-rw-r--r--clang/lib/Analysis/CFG.cpp50
-rw-r--r--clang/lib/Basic/Cuda.cpp1
-rw-r--r--clang/lib/Basic/Targets/LoongArch.h2
-rw-r--r--clang/lib/Basic/Targets/NVPTX.cpp1
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp1
-rw-r--r--clang/lib/CodeGen/Targets/AMDGPU.cpp6
-rw-r--r--clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp5
-rw-r--r--clang/lib/Format/UnwrappedLineParser.cpp6
-rw-r--r--clang/lib/Frontend/FrontendActions.cpp4
-rw-r--r--clang/lib/Interpreter/IncrementalParser.cpp3
-rw-r--r--clang/lib/Interpreter/Interpreter.cpp165
-rw-r--r--clang/lib/Lex/DependencyDirectivesScanner.cpp22
-rw-r--r--clang/lib/Parse/ParseStmt.cpp5
-rw-r--r--clang/lib/Sema/Scope.cpp4
-rw-r--r--clang/lib/Sema/SemaAMDGPU.cpp6
-rw-r--r--clang/lib/Sema/SemaDecl.cpp3
-rw-r--r--clang/lib/Sema/SemaExpr.cpp35
-rw-r--r--clang/lib/Sema/SemaExprCXX.cpp6
-rw-r--r--clang/lib/Sema/SemaInit.cpp19
-rw-r--r--clang/lib/Sema/SemaOpenACC.cpp1191
-rw-r--r--clang/lib/Sema/TreeTransform.h12
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngine.cpp56
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp4
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp5
-rw-r--r--clang/test/AST/Interp/arrays.cpp14
-rw-r--r--clang/test/AST/Interp/cxx23.cpp22
-rw-r--r--clang/test/AST/Interp/eval-order.cpp4
-rw-r--r--clang/test/AST/Interp/literals.cpp9
-rw-r--r--clang/test/AST/Interp/memberpointers.cpp197
-rw-r--r--clang/test/AST/ast-dump-default-init-json.cpp6
-rw-r--r--clang/test/AST/ast-dump-default-init.cpp2
-rw-r--r--clang/test/AST/ast-print-openacc-loop-construct.cpp9
-rw-r--r--clang/test/Analysis/cxx-uninitialized-object.cpp12
-rw-r--r--clang/test/Analysis/lifetime-extended-regions.cpp10
-rw-r--r--clang/test/CXX/drs/cwg16xx.cpp2
-rw-r--r--clang/test/CXX/drs/cwg18xx.cpp19
-rw-r--r--clang/test/CXX/special/class.temporary/p6.cpp34
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcpopv.c264
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcpopv.c264
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcpopv.c528
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcpopv.c528
-rw-r--r--clang/test/CodeGen/voidptr-vaarg.c478
-rw-r--r--clang/test/CodeGenCUDA/cuda-builtin-vars.cu24
-rw-r--r--clang/test/CodeGenCXX/inline-then-fold-variadics.cpp181
-rw-r--r--clang/test/CodeGenCXX/pointers-to-data-members.cpp1
-rw-r--r--clang/test/CodeGenCXX/template-param-objects-linkage.cpp1
-rw-r--r--clang/test/CodeGenOpenCL/amdgpu-features.cl2
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-gfx11.cl1
-rw-r--r--clang/test/Driver/aarch64-oryon-1.c19
-rw-r--r--clang/test/Driver/amdgpu-macros.cl1
-rw-r--r--clang/test/Driver/amdgpu-mcpu.cl2
-rw-r--r--clang/test/Interpreter/pretty-print.c8
-rw-r--r--clang/test/Misc/target-invalid-cpu-note.c8
-rw-r--r--clang/test/SemaCXX/attr-weak.cpp1
-rw-r--r--clang/test/SemaCXX/builtin-is-bitwise-cloneable-fsanitize.cpp34
-rw-r--r--clang/test/SemaCXX/builtin-is-bitwise-cloneable.cpp8
-rw-r--r--clang/test/SemaCXX/constexpr-default-arg.cpp4
-rw-r--r--clang/test/SemaCXX/cxx11-default-member-initializers.cpp74
-rw-r--r--clang/test/SemaCXX/eval-crashes.cpp6
-rw-r--r--clang/test/SemaCXX/nullptr_in_arithmetic_ops.cpp1
-rw-r--r--clang/test/SemaObjCXX/arc-type-traits.mm9
-rw-r--r--clang/test/SemaOpenACC/loop-construct-auto_seq_independent-clauses.c6
-rw-r--r--clang/test/SemaOpenACC/loop-construct-private-clause.c132
-rw-r--r--clang/test/SemaOpenACC/loop-construct-private-clause.cpp155
-rw-r--r--clang/test/SemaOpenCL/builtins-amdgcn-gfx940-err.cl6
-rw-r--r--clang/unittests/AST/Interp/toAPValue.cpp46
-rw-r--r--clang/unittests/Format/FormatTest.cpp11
-rw-r--r--clang/unittests/Lex/DependencyDirectivesScannerTest.cpp82
-rw-r--r--clang/unittests/Lex/PPDependencyDirectivesTest.cpp3
-rwxr-xr-xclang/www/cxx_dr_status.html2
105 files changed, 3634 insertions, 1865 deletions
diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst
index 46f99d0..a49e412 100644
--- a/clang/docs/LanguageExtensions.rst
+++ b/clang/docs/LanguageExtensions.rst
@@ -4016,6 +4016,30 @@ Note that the `size` argument must be a compile time constant.
Note that this intrinsic cannot yet be called in a ``constexpr`` context.
+``__is_bitwise_cloneable``
+--------------------------
+
+A type trait is used to check whether a type can be safely copied by memcpy.
+
+**Syntax**:
+
+.. code-block:: c++
+
+ bool __is_bitwise_cloneable(Type)
+
+**Description**:
+
+Objects of bitwise cloneable types can be bitwise copied by memcpy/memmove. The
+Clang compiler warrants that this behavior is well defined, and won't be
+broken by compiler optimizations and sanitizers.
+
+For implicit-lifetime types, the lifetime of the new object is implicitly
+started after the copy. For other types (e.g., classes with virtual methods),
+the lifetime isn't started, and using the object results in undefined behavior
+according to the C++ Standard.
+
+This builtin can be used in constant expressions.
+
Atomic Min/Max builtins with memory ordering
--------------------------------------------
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 69ac081..b9c9070 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -340,6 +340,9 @@ Non-comprehensive list of changes in this release
``-Winvalid-constexpr`` is not enabled for the function definition, which
should result in mild compile-time performance improvements.
+- Added ``__is_bitwise_cloneable`` which is used to check whether a type
+ can be safely copied by memcpy/memmove.
+
New Compiler Flags
------------------
- ``-fsanitize=implicit-bitfield-conversion`` checks implicit truncation and
diff --git a/clang/include/clang/AST/OpenACCClause.h b/clang/include/clang/AST/OpenACCClause.h
index a4c82cd..ea1ffbc 100644
--- a/clang/include/clang/AST/OpenACCClause.h
+++ b/clang/include/clang/AST/OpenACCClause.h
@@ -867,7 +867,7 @@ public:
case OpenACCClauseKind::CLAUSE_NAME: \
Visit##CLAUSE_NAME##Clause(*cast<OpenACC##CLAUSE_NAME##Clause>(C)); \
return;
-#define CLAUSE_ALIAS(ALIAS_NAME, CLAUSE_NAME) \
+#define CLAUSE_ALIAS(ALIAS_NAME, CLAUSE_NAME, DEPRECATED) \
case OpenACCClauseKind::ALIAS_NAME: \
Visit##CLAUSE_NAME##Clause(*cast<OpenACC##CLAUSE_NAME##Clause>(C)); \
return;
diff --git a/clang/include/clang/AST/Type.h b/clang/include/clang/AST/Type.h
index 263b632df..9eb3f6c 100644
--- a/clang/include/clang/AST/Type.h
+++ b/clang/include/clang/AST/Type.h
@@ -1120,6 +1120,20 @@ public:
/// Return true if this is a trivially copyable type (C++0x [basic.types]p9)
bool isTriviallyCopyableType(const ASTContext &Context) const;
+ /// Return true if the type is safe to bitwise copy using memcpy/memmove.
+ ///
+ /// This is an extension in clang: bitwise cloneable types act as trivially
+ /// copyable types, meaning their underlying bytes can be safely copied by
+ /// memcpy or memmove. After the copy, the destination object has the same
+ /// object representation.
+ ///
+ /// However, there are cases where it is not safe to copy:
+ /// - When sanitizers, such as AddressSanitizer, add padding with poison,
+ /// which can cause issues if those poisoned padding bits are accessed.
+ /// - Types with Objective-C lifetimes, where specific runtime
+ /// semantics may not be preserved during a bitwise copy.
+ bool isBitwiseCloneableType(const ASTContext &Context) const;
+
/// Return true if this is a trivially copyable type
bool isTriviallyCopyConstructibleType(const ASTContext &Context) const;
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def
index 433c779..9e6800e 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -240,7 +240,7 @@ TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_v2bf16, "V2sV2s*0V2s", "t", "at
TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_v2bf16, "V2sV2s*1V2s", "t", "atomic-global-pk-add-bf16-inst")
TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_v2bf16, "V2sV2s*3V2s", "t", "atomic-ds-pk-add-16-insts")
TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_v2f16, "V2hV2h*3V2h", "t", "atomic-ds-pk-add-16-insts")
-TARGET_BUILTIN(__builtin_amdgcn_global_load_lds, "vv*1v*3UiiUi", "t", "gfx940-insts")
+TARGET_BUILTIN(__builtin_amdgcn_global_load_lds, "vv*1v*3IUiIiIUi", "t", "gfx940-insts")
//===----------------------------------------------------------------------===//
// Deep learning builtins.
diff --git a/clang/include/clang/Basic/Cuda.h b/clang/include/clang/Basic/Cuda.h
index d15171d..0d5e38e 100644
--- a/clang/include/clang/Basic/Cuda.h
+++ b/clang/include/clang/Basic/Cuda.h
@@ -124,6 +124,7 @@ enum class CudaArch {
GFX1103,
GFX1150,
GFX1151,
+ GFX1152,
GFX12_GENERIC,
GFX1200,
GFX1201,
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 8774514..9f0b6f5 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -10082,6 +10082,12 @@ def warn_new_dangling_initializer_list : Warning<
"the allocated initializer list}0 "
"will be destroyed at the end of the full-expression">,
InGroup<DanglingInitializerList>;
+def warn_unsupported_lifetime_extension : Warning<
+ "lifetime extension of "
+ "%select{temporary|backing array of initializer list}0 created "
+ "by aggregate initialization using a default member initializer "
+ "is not yet supported; lifetime of %select{temporary|backing array}0 "
+ "will end at the end of the full-expression">, InGroup<Dangling>;
// For non-floating point, expressions of the form x == x or x != x
// should result in a warning, since these always evaluate to a constant.
diff --git a/clang/include/clang/Basic/OpenACCClauses.def b/clang/include/clang/Basic/OpenACCClauses.def
index 53f4cd1..85f4859 100644
--- a/clang/include/clang/Basic/OpenACCClauses.def
+++ b/clang/include/clang/Basic/OpenACCClauses.def
@@ -15,31 +15,31 @@
//
// VISIT_CLAUSE(CLAUSE_NAME)
//
-// CLAUSE_ALIAS(ALIAS_NAME, CLAUSE_NAME)
+// CLAUSE_ALIAS(ALIAS_NAME, CLAUSE_NAME, DEPRECATED)
#ifndef CLAUSE_ALIAS
-#define CLAUSE_ALIAS(ALIAS_NAME, CLAUSE_NAME)
+#define CLAUSE_ALIAS(ALIAS_NAME, CLAUSE_NAME, false)
#endif
VISIT_CLAUSE(Auto)
VISIT_CLAUSE(Async)
VISIT_CLAUSE(Attach)
VISIT_CLAUSE(Copy)
-CLAUSE_ALIAS(PCopy, Copy)
-CLAUSE_ALIAS(PresentOrCopy, Copy)
+CLAUSE_ALIAS(PCopy, Copy, true)
+CLAUSE_ALIAS(PresentOrCopy, Copy, true)
VISIT_CLAUSE(CopyIn)
-CLAUSE_ALIAS(PCopyIn, CopyIn)
-CLAUSE_ALIAS(PresentOrCopyIn, CopyIn)
+CLAUSE_ALIAS(PCopyIn, CopyIn, true)
+CLAUSE_ALIAS(PresentOrCopyIn, CopyIn, true)
VISIT_CLAUSE(CopyOut)
-CLAUSE_ALIAS(PCopyOut, CopyOut)
-CLAUSE_ALIAS(PresentOrCopyOut, CopyOut)
+CLAUSE_ALIAS(PCopyOut, CopyOut, true)
+CLAUSE_ALIAS(PresentOrCopyOut, CopyOut, true)
VISIT_CLAUSE(Create)
-CLAUSE_ALIAS(PCreate, Create)
-CLAUSE_ALIAS(PresentOrCreate, Create)
+CLAUSE_ALIAS(PCreate, Create, true)
+CLAUSE_ALIAS(PresentOrCreate, Create, true)
VISIT_CLAUSE(Default)
VISIT_CLAUSE(DevicePtr)
VISIT_CLAUSE(DeviceType)
-CLAUSE_ALIAS(DType, DeviceType)
+CLAUSE_ALIAS(DType, DeviceType, false)
VISIT_CLAUSE(FirstPrivate)
VISIT_CLAUSE(If)
VISIT_CLAUSE(Independent)
diff --git a/clang/include/clang/Basic/TokenKinds.def b/clang/include/clang/Basic/TokenKinds.def
index b5a0e9d..9c4b174 100644
--- a/clang/include/clang/Basic/TokenKinds.def
+++ b/clang/include/clang/Basic/TokenKinds.def
@@ -542,6 +542,8 @@ TYPE_TRAIT_2(__reference_converts_from_temporary, ReferenceConvertsFromTemporary
// is not exposed to users.
TYPE_TRAIT_2(/*EmptySpellingName*/, IsDeducible, KEYCXX)
+TYPE_TRAIT_1(__is_bitwise_cloneable, IsBitwiseCloneable, KEYALL)
+
// Embarcadero Expression Traits
EXPRESSION_TRAIT(__is_lvalue_expr, IsLValueExpr, KEYCXX)
EXPRESSION_TRAIT(__is_rvalue_expr, IsRValueExpr, KEYCXX)
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index cca4367..a0820e2 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -2637,7 +2637,8 @@ let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vbrev : RVVOutBuiltinSetZvbb;
defm vclz : RVVOutBuiltinSetZvbb;
defm vctz : RVVOutBuiltinSetZvbb;
- defm vcpopv : RVVOutBuiltinSetZvbb;
+ let IRName = "vcpopv", MaskedIRName = "vcpopv_mask" in
+ defm vcpop : RVVOutBuiltinSetZvbb;
let OverloadedName = "vwsll" in
defm vwsll : RVVSignedWidenBinBuiltinSetVwsll;
}
diff --git a/clang/include/clang/Lex/DependencyDirectivesScanner.h b/clang/include/clang/Lex/DependencyDirectivesScanner.h
index 2f8354d..0e11590 100644
--- a/clang/include/clang/Lex/DependencyDirectivesScanner.h
+++ b/clang/include/clang/Lex/DependencyDirectivesScanner.h
@@ -17,7 +17,6 @@
#ifndef LLVM_CLANG_LEX_DEPENDENCYDIRECTIVESSCANNER_H
#define LLVM_CLANG_LEX_DEPENDENCYDIRECTIVESSCANNER_H
-#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
@@ -118,7 +117,7 @@ struct Directive {
bool scanSourceForDependencyDirectives(
StringRef Input, SmallVectorImpl<dependency_directives_scan::Token> &Tokens,
SmallVectorImpl<dependency_directives_scan::Directive> &Directives,
- const LangOptions &LangOpts, DiagnosticsEngine *Diags = nullptr,
+ DiagnosticsEngine *Diags = nullptr,
SourceLocation InputSourceLoc = SourceLocation());
/// Print the previously scanned dependency directives as minimized source text.
diff --git a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h
index 9dc2006..f7b4510 100644
--- a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h
+++ b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h
@@ -363,8 +363,7 @@ public:
///
/// Returns true if the directive tokens are populated for this file entry,
/// false if not (i.e. this entry is not a file or its scan fails).
- bool ensureDirectiveTokensArePopulated(EntryRef Entry,
- const LangOptions &LangOpts);
+ bool ensureDirectiveTokensArePopulated(EntryRef Entry);
/// Check whether \p Path exists. By default checks cached result of \c
/// status(), and falls back on FS if unable to do so.
diff --git a/clang/lib/AST/CMakeLists.txt b/clang/lib/AST/CMakeLists.txt
index 3faefb5..a5d3dac 100644
--- a/clang/lib/AST/CMakeLists.txt
+++ b/clang/lib/AST/CMakeLists.txt
@@ -87,6 +87,7 @@ add_clang_library(clangAST
Interp/Record.cpp
Interp/Source.cpp
Interp/State.cpp
+ Interp/MemberPointer.cpp
Interp/InterpShared.cpp
ItaniumCXXABI.cpp
ItaniumMangle.cpp
diff --git a/clang/lib/AST/Interp/ByteCodeExprGen.cpp b/clang/lib/AST/Interp/ByteCodeExprGen.cpp
index 3671c41..d124248 100644
--- a/clang/lib/AST/Interp/ByteCodeExprGen.cpp
+++ b/clang/lib/AST/Interp/ByteCodeExprGen.cpp
@@ -100,6 +100,35 @@ bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
return this->emitMemcpy(CE);
}
+ case CK_DerivedToBaseMemberPointer: {
+ assert(classifyPrim(CE->getType()) == PT_MemberPtr);
+ assert(classifyPrim(SubExpr->getType()) == PT_MemberPtr);
+ const auto *FromMP = SubExpr->getType()->getAs<MemberPointerType>();
+ const auto *ToMP = CE->getType()->getAs<MemberPointerType>();
+
+ unsigned DerivedOffset = collectBaseOffset(QualType(ToMP->getClass(), 0),
+ QualType(FromMP->getClass(), 0));
+
+ if (!this->visit(SubExpr))
+ return false;
+
+ return this->emitGetMemberPtrBasePop(DerivedOffset, CE);
+ }
+
+ case CK_BaseToDerivedMemberPointer: {
+ assert(classifyPrim(CE) == PT_MemberPtr);
+ assert(classifyPrim(SubExpr) == PT_MemberPtr);
+ const auto *FromMP = SubExpr->getType()->getAs<MemberPointerType>();
+ const auto *ToMP = CE->getType()->getAs<MemberPointerType>();
+
+ unsigned DerivedOffset = collectBaseOffset(QualType(FromMP->getClass(), 0),
+ QualType(ToMP->getClass(), 0));
+
+ if (!this->visit(SubExpr))
+ return false;
+ return this->emitGetMemberPtrBasePop(-DerivedOffset, CE);
+ }
+
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
if (!this->visit(SubExpr))
@@ -187,7 +216,8 @@ bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
return this->emitCastFloatingIntegral(*ToT, CE);
}
- case CK_NullToPointer: {
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer: {
if (DiscardResult)
return true;
@@ -326,7 +356,8 @@ bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
return this->emitCast(*FromT, *ToT, CE);
}
- case CK_PointerToBoolean: {
+ case CK_PointerToBoolean:
+ case CK_MemberPointerToBoolean: {
PrimType PtrT = classifyPrim(SubExpr->getType());
// Just emit p != nullptr for this.
@@ -534,8 +565,23 @@ bool ByteCodeExprGen<Emitter>::VisitBinaryOperator(const BinaryOperator *BO) {
BO->isComparisonOp())
return this->emitComplexComparison(LHS, RHS, BO);
- if (BO->isPtrMemOp())
- return this->visit(RHS);
+ if (BO->isPtrMemOp()) {
+ if (!this->visit(LHS))
+ return false;
+
+ if (!this->visit(RHS))
+ return false;
+
+ if (!this->emitToMemberPtr(BO))
+ return false;
+
+ if (classifyPrim(BO) == PT_MemberPtr)
+ return true;
+
+ if (!this->emitCastMemberPtrPtr(BO))
+ return false;
+ return DiscardResult ? this->emitPopPtr(BO) : true;
+ }
// Typecheck the args.
std::optional<PrimType> LT = classify(LHS->getType());
@@ -2773,6 +2819,8 @@ bool ByteCodeExprGen<Emitter>::visitZeroInitializer(PrimType T, QualType QT,
return this->emitNullPtr(nullptr, E);
case PT_FnPtr:
return this->emitNullFnPtr(nullptr, E);
+ case PT_MemberPtr:
+ return this->emitNullMemberPtr(nullptr, E);
case PT_Float: {
return this->emitConstFloat(APFloat::getZero(Ctx.getFloatSemantics(QT)), E);
}
@@ -2875,6 +2923,7 @@ bool ByteCodeExprGen<Emitter>::emitConst(T Value, PrimType Ty, const Expr *E) {
return this->emitConstBool(Value, E);
case PT_Ptr:
case PT_FnPtr:
+ case PT_MemberPtr:
case PT_Float:
case PT_IntAP:
case PT_IntAPS:
@@ -3188,7 +3237,7 @@ bool ByteCodeExprGen<Emitter>::visitAPValueInitializer(const APValue &Val,
const APValue &F = Val.getStructField(I);
const Record::Field *RF = R->getField(I);
- if (F.isInt()) {
+ if (F.isInt() || F.isLValue()) {
PrimType T = classifyPrim(RF->Decl->getType());
if (!this->visitAPValue(F, T, E))
return false;
@@ -3308,10 +3357,27 @@ bool ByteCodeExprGen<Emitter>::VisitCallExpr(const CallExpr *E) {
}
}
+ std::optional<unsigned> CalleeOffset;
// Add the (optional, implicit) This pointer.
if (const auto *MC = dyn_cast<CXXMemberCallExpr>(E)) {
- if (!this->visit(MC->getImplicitObjectArgument()))
+ if (!FuncDecl && classifyPrim(E->getCallee()) == PT_MemberPtr) {
+ // If we end up creating a CallPtr op for this, we need the base of the
+ // member pointer as the instance pointer, and later extract the function
+ // decl as the function pointer.
+ const Expr *Callee = E->getCallee();
+ CalleeOffset =
+ this->allocateLocalPrimitive(Callee, PT_MemberPtr, true, false);
+ if (!this->visit(Callee))
+ return false;
+ if (!this->emitSetLocal(PT_MemberPtr, *CalleeOffset, E))
+ return false;
+ if (!this->emitGetLocal(PT_MemberPtr, *CalleeOffset, E))
+ return false;
+ if (!this->emitGetMemberPtrBase(E))
+ return false;
+ } else if (!this->visit(MC->getImplicitObjectArgument())) {
return false;
+ }
}
llvm::BitVector NonNullArgs = collectNonNullArgs(FuncDecl, Args);
@@ -3380,11 +3446,22 @@ bool ByteCodeExprGen<Emitter>::VisitCallExpr(const CallExpr *E) {
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
ArgSize += align(primSize(classify(E->getArg(I)).value_or(PT_Ptr)));
- if (!this->visit(E->getCallee()))
- return false;
+ // Get the callee, either from a member pointer saved in CalleeOffset,
+ // or by just visiting the Callee expr.
+ if (CalleeOffset) {
+ if (!this->emitGetLocal(PT_MemberPtr, *CalleeOffset, E))
+ return false;
+ if (!this->emitGetMemberPtrDecl(E))
+ return false;
+ if (!this->emitCallPtr(ArgSize, E, E))
+ return false;
+ } else {
+ if (!this->visit(E->getCallee()))
+ return false;
- if (!this->emitCallPtr(ArgSize, E, E))
- return false;
+ if (!this->emitCallPtr(ArgSize, E, E))
+ return false;
+ }
}
// Cleanup for discarded return values.
@@ -3623,6 +3700,11 @@ bool ByteCodeExprGen<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
return false;
return DiscardResult ? this->emitPop(*T, E) : true;
case UO_AddrOf: // &x
+ if (E->getType()->isMemberPointerType()) {
+ // C++11 [expr.unary.op]p3 has very strict rules on how the address of a
+ // member can be formed.
+ return this->emitGetMemberPtr(cast<DeclRefExpr>(SubExpr)->getDecl(), E);
+ }
// We should already have a pointer when we get here.
return this->delegate(SubExpr);
case UO_Deref: // *x
diff --git a/clang/lib/AST/Interp/Context.cpp b/clang/lib/AST/Interp/Context.cpp
index b0b22b0..98d1837 100644
--- a/clang/lib/AST/Interp/Context.cpp
+++ b/clang/lib/AST/Interp/Context.cpp
@@ -163,8 +163,12 @@ std::optional<PrimType> Context::classify(QualType T) const {
if (T->isFloatingType())
return PT_Float;
+ if (T->isSpecificBuiltinType(BuiltinType::BoundMember) ||
+ T->isMemberPointerType())
+ return PT_MemberPtr;
+
if (T->isFunctionPointerType() || T->isFunctionReferenceType() ||
- T->isFunctionType() || T->isSpecificBuiltinType(BuiltinType::BoundMember))
+ T->isFunctionType())
return PT_FnPtr;
if (T->isReferenceType() || T->isPointerType() ||
@@ -177,9 +181,6 @@ std::optional<PrimType> Context::classify(QualType T) const {
if (const auto *DT = dyn_cast<DecltypeType>(T))
return classify(DT->getUnderlyingType());
- if (const auto *DT = dyn_cast<MemberPointerType>(T))
- return classify(DT->getPointeeType());
-
return std::nullopt;
}
@@ -292,10 +293,12 @@ unsigned Context::collectBaseOffset(const RecordDecl *BaseDecl,
}
if (CurDecl == FinalDecl)
break;
-
- // break;
}
assert(OffsetSum > 0);
return OffsetSum;
}
+
+const Record *Context::getRecord(const RecordDecl *D) const {
+ return P->getOrCreateRecord(D);
+}
diff --git a/clang/lib/AST/Interp/Context.h b/clang/lib/AST/Interp/Context.h
index 360e949..c78dc9a 100644
--- a/clang/lib/AST/Interp/Context.h
+++ b/clang/lib/AST/Interp/Context.h
@@ -107,6 +107,8 @@ public:
unsigned collectBaseOffset(const RecordDecl *BaseDecl,
const RecordDecl *DerivedDecl) const;
+ const Record *getRecord(const RecordDecl *D) const;
+
private:
/// Runs a function.
bool Run(State &Parent, const Function *Func, APValue &Result);
diff --git a/clang/lib/AST/Interp/Descriptor.cpp b/clang/lib/AST/Interp/Descriptor.cpp
index 746b765..d20ab13 100644
--- a/clang/lib/AST/Interp/Descriptor.cpp
+++ b/clang/lib/AST/Interp/Descriptor.cpp
@@ -11,6 +11,7 @@
#include "Floating.h"
#include "FunctionPointer.h"
#include "IntegralAP.h"
+#include "MemberPointer.h"
#include "Pointer.h"
#include "PrimType.h"
#include "Record.h"
diff --git a/clang/lib/AST/Interp/Disasm.cpp b/clang/lib/AST/Interp/Disasm.cpp
index 3f8a92e..0ab84d1 100644
--- a/clang/lib/AST/Interp/Disasm.cpp
+++ b/clang/lib/AST/Interp/Disasm.cpp
@@ -19,6 +19,7 @@
#include "Integral.h"
#include "IntegralAP.h"
#include "InterpFrame.h"
+#include "MemberPointer.h"
#include "Opcode.h"
#include "PrimType.h"
#include "Program.h"
@@ -122,6 +123,8 @@ static const char *primTypeToString(PrimType T) {
return "Ptr";
case PT_FnPtr:
return "FnPtr";
+ case PT_MemberPtr:
+ return "MemberPtr";
}
llvm_unreachable("Unhandled PrimType");
}
diff --git a/clang/lib/AST/Interp/Function.cpp b/clang/lib/AST/Interp/Function.cpp
index 1d04998..00f5a1f 100644
--- a/clang/lib/AST/Interp/Function.cpp
+++ b/clang/lib/AST/Interp/Function.cpp
@@ -40,7 +40,8 @@ SourceInfo Function::getSource(CodePtr PC) const {
unsigned Offset = PC - getCodeBegin();
using Elem = std::pair<unsigned, SourceInfo>;
auto It = llvm::lower_bound(SrcMap, Elem{Offset, {}}, llvm::less_first());
- assert(It != SrcMap.end());
+ if (It == SrcMap.end())
+ return SrcMap.back().second;
return It->second;
}
diff --git a/clang/lib/AST/Interp/Interp.cpp b/clang/lib/AST/Interp/Interp.cpp
index 145fa65..49015b1 100644
--- a/clang/lib/AST/Interp/Interp.cpp
+++ b/clang/lib/AST/Interp/Interp.cpp
@@ -373,6 +373,26 @@ bool CheckSubobject(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
return false;
}
+bool CheckDowncast(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ uint32_t Offset) {
+ uint32_t MinOffset = Ptr.getDeclDesc()->getMetadataSize();
+ uint32_t PtrOffset = Ptr.getByteOffset();
+
+ // We subtract Offset from PtrOffset. The result must be at least
+ // MinOffset.
+ if (Offset < PtrOffset && (PtrOffset - Offset) >= MinOffset)
+ return true;
+
+ const auto *E = cast<CastExpr>(S.Current->getExpr(OpPC));
+ QualType TargetQT = E->getType()->getPointeeType();
+ QualType MostDerivedQT = Ptr.getDeclPtr().getType();
+
+ S.CCEDiag(E, diag::note_constexpr_invalid_downcast)
+ << MostDerivedQT << TargetQT;
+
+ return false;
+}
+
bool CheckConst(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
assert(Ptr.isLive() && "Pointer is not live");
if (!Ptr.isConst())
@@ -493,10 +513,12 @@ bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
bool CheckInvoke(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
if (!CheckLive(S, OpPC, Ptr, AK_MemberCall))
return false;
- if (!CheckExtern(S, OpPC, Ptr))
- return false;
- if (!CheckRange(S, OpPC, Ptr, AK_MemberCall))
- return false;
+ if (!Ptr.isDummy()) {
+ if (!CheckExtern(S, OpPC, Ptr))
+ return false;
+ if (!CheckRange(S, OpPC, Ptr, AK_MemberCall))
+ return false;
+ }
return true;
}
@@ -516,7 +538,7 @@ bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F) {
return false;
}
- if (!F->isConstexpr()) {
+ if (!F->isConstexpr() || !F->hasBody()) {
const SourceLocation &Loc = S.Current->getLocation(OpPC);
if (S.getLangOpts().CPlusPlus11) {
const FunctionDecl *DiagDecl = F->getDecl();
@@ -550,9 +572,10 @@ bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F) {
S.checkingPotentialConstantExpression())
return false;
- // If the declaration is defined _and_ declared 'constexpr', the below
- // diagnostic doesn't add anything useful.
- if (DiagDecl->isDefined() && DiagDecl->isConstexpr())
+ // If the declaration is defined, declared 'constexpr' _and_ has a body,
+ // the below diagnostic doesn't add anything useful.
+ if (DiagDecl->isDefined() && DiagDecl->isConstexpr() &&
+ DiagDecl->hasBody())
return false;
S.FFDiag(Loc, diag::note_constexpr_invalid_function, 1)
diff --git a/clang/lib/AST/Interp/Interp.h b/clang/lib/AST/Interp/Interp.h
index eca1792..98caea5 100644
--- a/clang/lib/AST/Interp/Interp.h
+++ b/clang/lib/AST/Interp/Interp.h
@@ -20,6 +20,7 @@
#include "InterpFrame.h"
#include "InterpStack.h"
#include "InterpState.h"
+#include "MemberPointer.h"
#include "Opcode.h"
#include "PrimType.h"
#include "Program.h"
@@ -75,6 +76,11 @@ bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
bool CheckSubobject(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
CheckSubobjectKind CSK);
+/// Checks if the dowcast using the given offset is possible with the given
+/// pointer.
+bool CheckDowncast(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ uint32_t Offset);
+
/// Checks if a pointer points to const storage.
bool CheckConst(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
@@ -725,6 +731,9 @@ using CompareFn = llvm::function_ref<bool(ComparisonCategoryResult)>;
template <typename T>
bool CmpHelper(InterpState &S, CodePtr OpPC, CompareFn Fn) {
+ assert((!std::is_same_v<T, MemberPointer>) &&
+ "Non-equality comparisons on member pointer types should already be "
+ "rejected in Sema.");
using BoolT = PrimConv<PT_Bool>::T;
const T &RHS = S.Stk.pop<T>();
const T &LHS = S.Stk.pop<T>();
@@ -834,6 +843,47 @@ inline bool CmpHelperEQ<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) {
}
}
+template <>
+inline bool CmpHelperEQ<MemberPointer>(InterpState &S, CodePtr OpPC,
+ CompareFn Fn) {
+ const auto &RHS = S.Stk.pop<MemberPointer>();
+ const auto &LHS = S.Stk.pop<MemberPointer>();
+
+ // If either operand is a pointer to a weak function, the comparison is not
+ // constant.
+ for (const auto &MP : {LHS, RHS}) {
+ if (const CXXMethodDecl *MD = MP.getMemberFunction(); MD && MD->isWeak()) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_mem_pointer_weak_comparison) << MD;
+ return false;
+ }
+ }
+
+ // C++11 [expr.eq]p2:
+ // If both operands are null, they compare equal. Otherwise if only one is
+ // null, they compare unequal.
+ if (LHS.isZero() && RHS.isZero()) {
+ S.Stk.push<Boolean>(Fn(ComparisonCategoryResult::Equal));
+ return true;
+ }
+ if (LHS.isZero() || RHS.isZero()) {
+ S.Stk.push<Boolean>(Fn(ComparisonCategoryResult::Unordered));
+ return true;
+ }
+
+ // We cannot compare against virtual declarations at compile time.
+ for (const auto &MP : {LHS, RHS}) {
+ if (const CXXMethodDecl *MD = MP.getMemberFunction();
+ MD && MD->isVirtual()) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.CCEDiag(Loc, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
+ }
+ }
+
+ S.Stk.push<Boolean>(Boolean::from(Fn(LHS.compare(RHS))));
+ return true;
+}
+
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool EQ(InterpState &S, CodePtr OpPC) {
return CmpHelperEQ<T>(S, OpPC, [](ComparisonCategoryResult R) {
@@ -1300,6 +1350,9 @@ inline bool GetPtrDerivedPop(InterpState &S, CodePtr OpPC, uint32_t Off) {
return false;
if (!CheckSubobject(S, OpPC, Ptr, CSK_Derived))
return false;
+ if (!CheckDowncast(S, OpPC, Ptr, Off))
+ return false;
+
S.Stk.push<Pointer>(Ptr.atFieldSub(Off));
return true;
}
@@ -1324,6 +1377,12 @@ inline bool GetPtrBasePop(InterpState &S, CodePtr OpPC, uint32_t Off) {
return true;
}
+inline bool GetMemberPtrBasePop(InterpState &S, CodePtr OpPC, int32_t Off) {
+ const auto &Ptr = S.Stk.pop<MemberPointer>();
+ S.Stk.push<MemberPointer>(Ptr.atInstanceBase(Off));
+ return true;
+}
+
inline bool GetPtrThisBase(InterpState &S, CodePtr OpPC, uint32_t Off) {
if (S.checkingPotentialConstantExpression())
return false;
@@ -1532,6 +1591,24 @@ inline bool Memcpy(InterpState &S, CodePtr OpPC) {
return DoMemcpy(S, OpPC, Src, Dest);
}
+inline bool ToMemberPtr(InterpState &S, CodePtr OpPC) {
+ const auto &Member = S.Stk.pop<MemberPointer>();
+ const auto &Base = S.Stk.pop<Pointer>();
+
+ S.Stk.push<MemberPointer>(Member.takeInstance(Base));
+ return true;
+}
+
+inline bool CastMemberPtrPtr(InterpState &S, CodePtr OpPC) {
+ const auto &MP = S.Stk.pop<MemberPointer>();
+
+ if (std::optional<Pointer> Ptr = MP.toPointer(S.Ctx)) {
+ S.Stk.push<Pointer>(*Ptr);
+ return true;
+ }
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// AddOffset, SubOffset
//===----------------------------------------------------------------------===//
@@ -1696,8 +1773,10 @@ inline bool SubPtr(InterpState &S, CodePtr OpPC) {
return true;
}
- T A = T::from(LHS.getIndex());
- T B = T::from(RHS.getIndex());
+ T A = LHS.isElementPastEnd() ? T::from(LHS.getNumElems())
+ : T::from(LHS.getIndex());
+ T B = RHS.isElementPastEnd() ? T::from(RHS.getNumElems())
+ : T::from(RHS.getIndex());
return AddSubMulHelper<T, T::sub, std::minus>(S, OpPC, A.bitWidth(), A, B);
}
@@ -2115,7 +2194,7 @@ inline bool ArrayDecay(InterpState &S, CodePtr OpPC) {
if (!CheckRange(S, OpPC, Ptr, CSK_ArrayToPointer))
return false;
- if (!Ptr.isUnknownSizeArray() || Ptr.isDummy()) {
+ if (Ptr.isRoot() || !Ptr.isUnknownSizeArray() || Ptr.isDummy()) {
S.Stk.push<Pointer>(Ptr.atIndex(0));
return true;
}
@@ -2329,6 +2408,28 @@ inline bool GetIntPtr(InterpState &S, CodePtr OpPC, const Descriptor *Desc) {
return true;
}
+inline bool GetMemberPtr(InterpState &S, CodePtr OpPC, const Decl *D) {
+ S.Stk.push<MemberPointer>(D);
+ return true;
+}
+
+inline bool GetMemberPtrBase(InterpState &S, CodePtr OpPC) {
+ const auto &MP = S.Stk.pop<MemberPointer>();
+
+ S.Stk.push<Pointer>(MP.getBase());
+ return true;
+}
+
+inline bool GetMemberPtrDecl(InterpState &S, CodePtr OpPC) {
+ const auto &MP = S.Stk.pop<MemberPointer>();
+
+ const auto *FD = cast<FunctionDecl>(MP.getDecl());
+ const auto *Func = S.getContext().getOrCreateFunction(FD);
+
+ S.Stk.push<FunctionPointer>(Func);
+ return true;
+}
+
/// Just emit a diagnostic. The expression that caused emission of this
/// op is not valid in a constant context.
inline bool Invalid(InterpState &S, CodePtr OpPC) {
diff --git a/clang/lib/AST/Interp/InterpFrame.cpp b/clang/lib/AST/Interp/InterpFrame.cpp
index 51b0bd5..54ccf90 100644
--- a/clang/lib/AST/Interp/InterpFrame.cpp
+++ b/clang/lib/AST/Interp/InterpFrame.cpp
@@ -12,6 +12,7 @@
#include "Function.h"
#include "InterpStack.h"
#include "InterpState.h"
+#include "MemberPointer.h"
#include "Pointer.h"
#include "PrimType.h"
#include "Program.h"
diff --git a/clang/lib/AST/Interp/InterpStack.cpp b/clang/lib/AST/Interp/InterpStack.cpp
index 91fe40f..c702474 100644
--- a/clang/lib/AST/Interp/InterpStack.cpp
+++ b/clang/lib/AST/Interp/InterpStack.cpp
@@ -10,6 +10,7 @@
#include "Boolean.h"
#include "Floating.h"
#include "Integral.h"
+#include "MemberPointer.h"
#include "Pointer.h"
#include <cassert>
#include <cstdlib>
diff --git a/clang/lib/AST/Interp/InterpStack.h b/clang/lib/AST/Interp/InterpStack.h
index 3fd0f63..9d85503 100644
--- a/clang/lib/AST/Interp/InterpStack.h
+++ b/clang/lib/AST/Interp/InterpStack.h
@@ -15,6 +15,7 @@
#include "FunctionPointer.h"
#include "IntegralAP.h"
+#include "MemberPointer.h"
#include "PrimType.h"
#include <memory>
#include <vector>
@@ -188,6 +189,8 @@ private:
return PT_IntAP;
else if constexpr (std::is_same_v<T, IntegralAP<false>>)
return PT_IntAP;
+ else if constexpr (std::is_same_v<T, MemberPointer>)
+ return PT_MemberPtr;
llvm_unreachable("unknown type push()'ed into InterpStack");
}
diff --git a/clang/lib/AST/Interp/MemberPointer.cpp b/clang/lib/AST/Interp/MemberPointer.cpp
new file mode 100644
index 0000000..96f6364
--- /dev/null
+++ b/clang/lib/AST/Interp/MemberPointer.cpp
@@ -0,0 +1,76 @@
+//===------------------------- MemberPointer.cpp ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "MemberPointer.h"
+#include "Context.h"
+#include "FunctionPointer.h"
+#include "Program.h"
+#include "Record.h"
+
+namespace clang {
+namespace interp {
+
+std::optional<Pointer> MemberPointer::toPointer(const Context &Ctx) const {
+ if (!Dcl || isa<FunctionDecl>(Dcl))
+ return Base;
+ const FieldDecl *FD = cast<FieldDecl>(Dcl);
+ assert(FD);
+
+ if (!Base.isBlockPointer())
+ return std::nullopt;
+
+ Pointer CastedBase =
+ (PtrOffset < 0 ? Base.atField(-PtrOffset) : Base.atFieldSub(PtrOffset));
+
+ const Record *BaseRecord = CastedBase.getRecord();
+ if (!BaseRecord)
+ return std::nullopt;
+
+ assert(BaseRecord);
+ if (FD->getParent() == BaseRecord->getDecl())
+ return CastedBase.atField(BaseRecord->getField(FD)->Offset);
+
+ const RecordDecl *FieldParent = FD->getParent();
+ const Record *FieldRecord = Ctx.getRecord(FieldParent);
+
+ unsigned Offset = 0;
+ Offset += FieldRecord->getField(FD)->Offset;
+ Offset += CastedBase.block()->getDescriptor()->getMetadataSize();
+
+ if (Offset > CastedBase.block()->getSize())
+ return std::nullopt;
+
+ if (const RecordDecl *BaseDecl = Base.getDeclPtr().getRecord()->getDecl();
+ BaseDecl != FieldParent)
+ Offset += Ctx.collectBaseOffset(FieldParent, BaseDecl);
+
+ if (Offset > CastedBase.block()->getSize())
+ return std::nullopt;
+
+ assert(Offset <= CastedBase.block()->getSize());
+ return Pointer(const_cast<Block *>(Base.block()), Offset, Offset);
+}
+
+FunctionPointer MemberPointer::toFunctionPointer(const Context &Ctx) const {
+ return FunctionPointer(Ctx.getProgram().getFunction(cast<FunctionDecl>(Dcl)));
+}
+
+APValue MemberPointer::toAPValue() const {
+ if (isZero())
+ return APValue(static_cast<ValueDecl *>(nullptr), /*IsDerivedMember=*/false,
+ /*Path=*/{});
+
+ if (hasBase())
+ return Base.toAPValue();
+
+ return APValue(cast<ValueDecl>(getDecl()), /*IsDerivedMember=*/false,
+ /*Path=*/{});
+}
+
+} // namespace interp
+} // namespace clang
diff --git a/clang/lib/AST/Interp/MemberPointer.h b/clang/lib/AST/Interp/MemberPointer.h
new file mode 100644
index 0000000..5c61f6a
--- /dev/null
+++ b/clang/lib/AST/Interp/MemberPointer.h
@@ -0,0 +1,112 @@
+//===------------------------- MemberPointer.h ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_INTERP_MEMBER_POINTER_H
+#define LLVM_CLANG_AST_INTERP_MEMBER_POINTER_H
+
+#include "Pointer.h"
+#include <optional>
+
+namespace clang {
+class ASTContext;
+namespace interp {
+
+class Context;
+class FunctionPointer;
+
+class MemberPointer final {
+private:
+ Pointer Base;
+ const Decl *Dcl = nullptr;
+ int32_t PtrOffset = 0;
+
+ MemberPointer(Pointer Base, const Decl *Dcl, int32_t PtrOffset)
+ : Base(Base), Dcl(Dcl), PtrOffset(PtrOffset) {}
+
+public:
+ MemberPointer() = default;
+ MemberPointer(Pointer Base, const Decl *Dcl) : Base(Base), Dcl(Dcl) {}
+ MemberPointer(uint32_t Address, const Descriptor *D) {
+ // We only reach this for Address == 0, when creating a null member pointer.
+ assert(Address == 0);
+ }
+
+ MemberPointer(const Decl *D) : Dcl(D) {
+ assert((isa<FieldDecl, IndirectFieldDecl, CXXMethodDecl>(D)));
+ }
+
+ uint64_t getIntegerRepresentation() const {
+ assert(
+ false &&
+ "getIntegerRepresentation() shouldn't be reachable for MemberPointers");
+ return 17;
+ }
+
+ std::optional<Pointer> toPointer(const Context &Ctx) const;
+
+ FunctionPointer toFunctionPointer(const Context &Ctx) const;
+
+ Pointer getBase() const {
+ if (PtrOffset < 0)
+ return Base.atField(-PtrOffset);
+ return Base.atFieldSub(PtrOffset);
+ }
+ bool isMemberFunctionPointer() const {
+ return isa_and_nonnull<CXXMethodDecl>(Dcl);
+ }
+ const CXXMethodDecl *getMemberFunction() const {
+ return dyn_cast_if_present<CXXMethodDecl>(Dcl);
+ }
+ const FieldDecl *getField() const {
+ return dyn_cast_if_present<FieldDecl>(Dcl);
+ }
+
+ bool hasDecl() const { return Dcl; }
+ const Decl *getDecl() const { return Dcl; }
+
+ MemberPointer atInstanceBase(unsigned Offset) const {
+ if (Base.isZero())
+ return MemberPointer(Base, Dcl, Offset);
+ return MemberPointer(this->Base, Dcl, Offset + PtrOffset);
+ }
+
+ MemberPointer takeInstance(Pointer Instance) const {
+ assert(this->Base.isZero());
+ return MemberPointer(Instance, this->Dcl, this->PtrOffset);
+ }
+
+ APValue toAPValue() const;
+
+ bool isZero() const { return Base.isZero() && !Dcl; }
+ bool hasBase() const { return !Base.isZero(); }
+
+ void print(llvm::raw_ostream &OS) const {
+ OS << "MemberPtr(" << Base << " " << (void *)Dcl << " + " << PtrOffset
+ << ")";
+ }
+
+ std::string toDiagnosticString(const ASTContext &Ctx) const {
+ return "FIXME";
+ }
+
+ ComparisonCategoryResult compare(const MemberPointer &RHS) const {
+ if (this->Dcl == RHS.Dcl)
+ return ComparisonCategoryResult::Equal;
+ return ComparisonCategoryResult::Unordered;
+ }
+};
+
+inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, MemberPointer FP) {
+ FP.print(OS);
+ return OS;
+}
+
+} // namespace interp
+} // namespace clang
+
+#endif
diff --git a/clang/lib/AST/Interp/Opcodes.td b/clang/lib/AST/Interp/Opcodes.td
index cfbd7f93..cb4f299 100644
--- a/clang/lib/AST/Interp/Opcodes.td
+++ b/clang/lib/AST/Interp/Opcodes.td
@@ -30,6 +30,7 @@ def IntAPS : Type;
def Float : Type;
def Ptr : Type;
def FnPtr : Type;
+def MemberPtr : Type;
//===----------------------------------------------------------------------===//
// Types transferred to the interpreter.
@@ -61,6 +62,7 @@ def ArgOffsetOfExpr : ArgType { let Name = "const OffsetOfExpr *"; }
def ArgDeclRef : ArgType { let Name = "const DeclRefExpr *"; }
def ArgDesc : ArgType { let Name = "const Descriptor *"; }
def ArgCCI : ArgType { let Name = "const ComparisonCategoryInfo *"; }
+def ArgDecl : ArgType { let Name = "const Decl*"; }
//===----------------------------------------------------------------------===//
// Classes of types instructions operate on.
@@ -93,7 +95,7 @@ def AluTypeClass : TypeClass {
}
def PtrTypeClass : TypeClass {
- let Types = [Ptr, FnPtr];
+ let Types = [Ptr, FnPtr, MemberPtr];
}
def BoolTypeClass : TypeClass {
@@ -208,7 +210,6 @@ def CallBI : Opcode {
def CallPtr : Opcode {
let Args = [ArgUint32, ArgCallExpr];
- let Types = [];
}
def CallVar : Opcode {
@@ -327,6 +328,11 @@ def GetPtrBasePop : Opcode {
// Offset of field, which is a base.
let Args = [ArgUint32];
}
+def GetMemberPtrBasePop : Opcode {
+ // Offset of field, which is a base.
+ let Args = [ArgSint32];
+}
+
def FinishInitPop : Opcode;
def FinishInit : Opcode;
@@ -751,6 +757,14 @@ def CheckNonNullArg : Opcode {
def Memcpy : Opcode;
+def ToMemberPtr : Opcode;
+def CastMemberPtrPtr : Opcode;
+def GetMemberPtr : Opcode {
+ let Args = [ArgDecl];
+}
+def GetMemberPtrBase : Opcode;
+def GetMemberPtrDecl : Opcode;
+
//===----------------------------------------------------------------------===//
// Debugging.
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/AST/Interp/Pointer.cpp b/clang/lib/AST/Interp/Pointer.cpp
index 252f7ea..a60b4d2 100644
--- a/clang/lib/AST/Interp/Pointer.cpp
+++ b/clang/lib/AST/Interp/Pointer.cpp
@@ -13,6 +13,7 @@
#include "Function.h"
#include "Integral.h"
#include "InterpBlock.h"
+#include "MemberPointer.h"
#include "PrimType.h"
#include "Record.h"
diff --git a/clang/lib/AST/Interp/Pointer.h b/clang/lib/AST/Interp/Pointer.h
index 93ca754..c6e4f4d 100644
--- a/clang/lib/AST/Interp/Pointer.h
+++ b/clang/lib/AST/Interp/Pointer.h
@@ -620,6 +620,7 @@ public:
private:
friend class Block;
friend class DeadBlock;
+ friend class MemberPointer;
friend struct InitMap;
Pointer(Block *Pointee, unsigned Base, uint64_t Offset);
diff --git a/clang/lib/AST/Interp/PrimType.cpp b/clang/lib/AST/Interp/PrimType.cpp
index 9b96dcf..3054e67 100644
--- a/clang/lib/AST/Interp/PrimType.cpp
+++ b/clang/lib/AST/Interp/PrimType.cpp
@@ -11,6 +11,7 @@
#include "Floating.h"
#include "FunctionPointer.h"
#include "IntegralAP.h"
+#include "MemberPointer.h"
#include "Pointer.h"
using namespace clang;
diff --git a/clang/lib/AST/Interp/PrimType.h b/clang/lib/AST/Interp/PrimType.h
index 604fb5d..20fb5e8 100644
--- a/clang/lib/AST/Interp/PrimType.h
+++ b/clang/lib/AST/Interp/PrimType.h
@@ -25,6 +25,7 @@ class Pointer;
class Boolean;
class Floating;
class FunctionPointer;
+class MemberPointer;
template <bool Signed> class IntegralAP;
template <unsigned Bits, bool Signed> class Integral;
@@ -44,10 +45,11 @@ enum PrimType : unsigned {
PT_Float = 11,
PT_Ptr = 12,
PT_FnPtr = 13,
+ PT_MemberPtr = 14,
};
inline constexpr bool isPtrType(PrimType T) {
- return T == PT_Ptr || T == PT_FnPtr;
+ return T == PT_Ptr || T == PT_FnPtr || T == PT_MemberPtr;
}
enum class CastKind : uint8_t {
@@ -91,6 +93,9 @@ template <> struct PrimConv<PT_Ptr> { using T = Pointer; };
template <> struct PrimConv<PT_FnPtr> {
using T = FunctionPointer;
};
+template <> struct PrimConv<PT_MemberPtr> {
+ using T = MemberPointer;
+};
/// Returns the size of a primitive type in bytes.
size_t primSize(PrimType Type);
@@ -131,6 +136,7 @@ static inline bool aligned(const void *P) {
TYPE_SWITCH_CASE(PT_Bool, B) \
TYPE_SWITCH_CASE(PT_Ptr, B) \
TYPE_SWITCH_CASE(PT_FnPtr, B) \
+ TYPE_SWITCH_CASE(PT_MemberPtr, B) \
} \
} while (0)
diff --git a/clang/lib/AST/OpenACCClause.cpp b/clang/lib/AST/OpenACCClause.cpp
index 403ce9a..95089a9 100644
--- a/clang/lib/AST/OpenACCClause.cpp
+++ b/clang/lib/AST/OpenACCClause.cpp
@@ -104,7 +104,7 @@ OpenACCClause::child_range OpenACCClause::children() {
#define VISIT_CLAUSE(CLAUSE_NAME) \
case OpenACCClauseKind::CLAUSE_NAME: \
return cast<OpenACC##CLAUSE_NAME##Clause>(this)->children();
-#define CLAUSE_ALIAS(ALIAS_NAME, CLAUSE_NAME) \
+#define CLAUSE_ALIAS(ALIAS_NAME, CLAUSE_NAME, DEPRECATED) \
case OpenACCClauseKind::ALIAS_NAME: \
return cast<OpenACC##CLAUSE_NAME##Clause>(this)->children();
diff --git a/clang/lib/AST/ParentMap.cpp b/clang/lib/AST/ParentMap.cpp
index 534793b..3d6a1cc 100644
--- a/clang/lib/AST/ParentMap.cpp
+++ b/clang/lib/AST/ParentMap.cpp
@@ -97,22 +97,6 @@ static void BuildParentMap(MapTy& M, Stmt* S,
BuildParentMap(M, SubStmt, OVMode);
}
break;
- case Stmt::CXXDefaultArgExprClass:
- if (auto *Arg = dyn_cast<CXXDefaultArgExpr>(S)) {
- if (Arg->hasRewrittenInit()) {
- M[Arg->getExpr()] = S;
- BuildParentMap(M, Arg->getExpr(), OVMode);
- }
- }
- break;
- case Stmt::CXXDefaultInitExprClass:
- if (auto *Init = dyn_cast<CXXDefaultInitExpr>(S)) {
- if (Init->hasRewrittenInit()) {
- M[Init->getExpr()] = S;
- BuildParentMap(M, Init->getExpr(), OVMode);
- }
- }
- break;
default:
for (Stmt *SubStmt : S->children()) {
if (SubStmt) {
diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp
index 8baccee..1076dcd 100644
--- a/clang/lib/AST/TextNodeDumper.cpp
+++ b/clang/lib/AST/TextNodeDumper.cpp
@@ -958,6 +958,9 @@ void TextNodeDumper::dumpTemplateArgument(const TemplateArgument &TA) {
}
OS << " '" << Str << "'";
+ if (!Context)
+ return;
+
if (TemplateArgument CanonTA = Context->getCanonicalTemplateArgument(TA);
!CanonTA.structurallyEquals(TA)) {
llvm::SmallString<128> CanonStr;
@@ -1139,15 +1142,17 @@ void TextNodeDumper::dumpTemplateName(TemplateName TN, StringRef Label) {
}
OS << " '" << Str << "'";
- if (TemplateName CanonTN = Context->getCanonicalTemplateName(TN);
- CanonTN != TN) {
- llvm::SmallString<128> CanonStr;
- {
- llvm::raw_svector_ostream SS(CanonStr);
- CanonTN.print(SS, PrintPolicy);
+ if (Context) {
+ if (TemplateName CanonTN = Context->getCanonicalTemplateName(TN);
+ CanonTN != TN) {
+ llvm::SmallString<128> CanonStr;
+ {
+ llvm::raw_svector_ostream SS(CanonStr);
+ CanonTN.print(SS, PrintPolicy);
+ }
+ if (CanonStr != Str)
+ OS << ":'" << CanonStr << "'";
}
- if (CanonStr != Str)
- OS << ":'" << CanonStr << "'";
}
}
dumpBareTemplateName(TN);
diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp
index 2097b29..33acae2 100644
--- a/clang/lib/AST/Type.cpp
+++ b/clang/lib/AST/Type.cpp
@@ -2749,6 +2749,43 @@ bool QualType::isTriviallyCopyableType(const ASTContext &Context) const {
/*IsCopyConstructible=*/false);
}
+// FIXME: each call will trigger a full computation, cache the result.
+bool QualType::isBitwiseCloneableType(const ASTContext &Context) const {
+ auto CanonicalType = getCanonicalType();
+ if (CanonicalType.hasNonTrivialObjCLifetime())
+ return false;
+ if (CanonicalType->isArrayType())
+ return Context.getBaseElementType(CanonicalType)
+ .isBitwiseCloneableType(Context);
+
+ if (CanonicalType->isIncompleteType())
+ return false;
+ const auto *RD = CanonicalType->getAsRecordDecl(); // struct/union/class
+ if (!RD)
+ return true;
+
+ // Never allow memcpy when we're adding poisoned padding bits to the struct.
+ // Accessing these posioned bits will trigger false alarms on
+ // SanitizeAddressFieldPadding etc.
+ if (RD->mayInsertExtraPadding())
+ return false;
+
+ for (auto *const Field : RD->fields()) {
+ if (!Field->getType().isBitwiseCloneableType(Context))
+ return false;
+ }
+
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (auto Base : CXXRD->bases())
+ if (!Base.getType().isBitwiseCloneableType(Context))
+ return false;
+ for (auto VBase : CXXRD->vbases())
+ if (!VBase.getType().isBitwiseCloneableType(Context))
+ return false;
+ }
+ return true;
+}
+
bool QualType::isTriviallyCopyConstructibleType(
const ASTContext &Context) const {
return isTriviallyCopyableTypeImpl(*this, Context,
@@ -4444,7 +4481,6 @@ static CachedProperties computeCachedProperties(const Type *T) {
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class:
#include "clang/AST/TypeNodes.inc"
// Treat instantiation-dependent types as external.
- if (!T->isInstantiationDependentType()) T->dump();
assert(T->isInstantiationDependentType());
return CachedProperties(Linkage::External, false);
diff --git a/clang/lib/Analysis/CFG.cpp b/clang/lib/Analysis/CFG.cpp
index 0231725..64e6155 100644
--- a/clang/lib/Analysis/CFG.cpp
+++ b/clang/lib/Analysis/CFG.cpp
@@ -556,10 +556,6 @@ public:
private:
// Visitors to walk an AST and construct the CFG.
- CFGBlock *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *Default,
- AddStmtChoice asc);
- CFGBlock *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *Default,
- AddStmtChoice asc);
CFGBlock *VisitInitListExpr(InitListExpr *ILE, AddStmtChoice asc);
CFGBlock *VisitAddrLabelExpr(AddrLabelExpr *A, AddStmtChoice asc);
CFGBlock *VisitAttributedStmt(AttributedStmt *A, AddStmtChoice asc);
@@ -2258,10 +2254,16 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc,
asc, ExternallyDestructed);
case Stmt::CXXDefaultArgExprClass:
- return VisitCXXDefaultArgExpr(cast<CXXDefaultArgExpr>(S), asc);
-
case Stmt::CXXDefaultInitExprClass:
- return VisitCXXDefaultInitExpr(cast<CXXDefaultInitExpr>(S), asc);
+ // FIXME: The expression inside a CXXDefaultArgExpr is owned by the
+ // called function's declaration, not by the caller. If we simply add
+ // this expression to the CFG, we could end up with the same Expr
+ // appearing multiple times (PR13385).
+ //
+ // It's likewise possible for multiple CXXDefaultInitExprs for the same
+ // expression to be used in the same function (through aggregate
+ // initialization).
+ return VisitStmt(S, asc);
case Stmt::CXXBindTemporaryExprClass:
return VisitCXXBindTemporaryExpr(cast<CXXBindTemporaryExpr>(S), asc);
@@ -2431,40 +2433,6 @@ CFGBlock *CFGBuilder::VisitChildren(Stmt *S) {
return B;
}
-CFGBlock *CFGBuilder::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *Arg,
- AddStmtChoice asc) {
- if (Arg->hasRewrittenInit()) {
- if (asc.alwaysAdd(*this, Arg)) {
- autoCreateBlock();
- appendStmt(Block, Arg);
- }
- return VisitStmt(Arg->getExpr(), asc);
- }
-
- // We can't add the default argument if it's not rewritten because the
- // expression inside a CXXDefaultArgExpr is owned by the called function's
- // declaration, not by the caller, we could end up with the same expression
- // appearing multiple times.
- return VisitStmt(Arg, asc);
-}
-
-CFGBlock *CFGBuilder::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *Init,
- AddStmtChoice asc) {
- if (Init->hasRewrittenInit()) {
- if (asc.alwaysAdd(*this, Init)) {
- autoCreateBlock();
- appendStmt(Block, Init);
- }
- return VisitStmt(Init->getExpr(), asc);
- }
-
- // We can't add the default initializer if it's not rewritten because multiple
- // CXXDefaultInitExprs for the same sub-expression to be used in the same
- // function (through aggregate initialization). we could end up with the same
- // expression appearing multiple times.
- return VisitStmt(Init, asc);
-}
-
CFGBlock *CFGBuilder::VisitInitListExpr(InitListExpr *ILE, AddStmtChoice asc) {
if (asc.alwaysAdd(*this, ILE)) {
autoCreateBlock();
diff --git a/clang/lib/Basic/Cuda.cpp b/clang/lib/Basic/Cuda.cpp
index e2609b9..1d96a92 100644
--- a/clang/lib/Basic/Cuda.cpp
+++ b/clang/lib/Basic/Cuda.cpp
@@ -144,6 +144,7 @@ static const CudaArchToStringMap arch_names[] = {
GFX(1103), // gfx1103
GFX(1150), // gfx1150
GFX(1151), // gfx1151
+ GFX(1152), // gfx1152
{CudaArch::GFX12_GENERIC, "gfx12-generic", "compute_amdgcn"},
GFX(1200), // gfx1200
GFX(1201), // gfx1201
diff --git a/clang/lib/Basic/Targets/LoongArch.h b/clang/lib/Basic/Targets/LoongArch.h
index 6857284..5fc2234 100644
--- a/clang/lib/Basic/Targets/LoongArch.h
+++ b/clang/lib/Basic/Targets/LoongArch.h
@@ -133,7 +133,7 @@ public:
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
IntMaxType = Int64Type = SignedLong;
HasUnalignedAccess = true;
- resetDataLayout("e-m:e-p:64:64-i64:64-i128:128-n64-S128");
+ resetDataLayout("e-m:e-p:64:64-i64:64-i128:128-n32:64-S128");
// TODO: select appropriate ABI.
setABI("lp64d");
}
diff --git a/clang/lib/Basic/Targets/NVPTX.cpp b/clang/lib/Basic/Targets/NVPTX.cpp
index fc6ef11..ff7d2f1 100644
--- a/clang/lib/Basic/Targets/NVPTX.cpp
+++ b/clang/lib/Basic/Targets/NVPTX.cpp
@@ -228,6 +228,7 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
case CudaArch::GFX1103:
case CudaArch::GFX1150:
case CudaArch::GFX1151:
+ case CudaArch::GFX1152:
case CudaArch::GFX12_GENERIC:
case CudaArch::GFX1200:
case CudaArch::GFX1201:
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index 191bd75..6e9a1ba 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -3537,6 +3537,7 @@ void CGOpenMPRuntimeGPU::processRequiresDirective(
case CudaArch::GFX1103:
case CudaArch::GFX1150:
case CudaArch::GFX1151:
+ case CudaArch::GFX1152:
case CudaArch::GFX12_GENERIC:
case CudaArch::GFX1200:
case CudaArch::GFX1201:
diff --git a/clang/lib/CodeGen/Targets/AMDGPU.cpp b/clang/lib/CodeGen/Targets/AMDGPU.cpp
index d1ff8b4..057f6ef 100644
--- a/clang/lib/CodeGen/Targets/AMDGPU.cpp
+++ b/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -120,7 +120,11 @@ void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
Address AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const {
- llvm_unreachable("AMDGPU does not support varargs");
+ const bool IsIndirect = false;
+ const bool AllowHigherAlign = false;
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
+ getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(4), AllowHigherAlign);
}
ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
diff --git a/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp b/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
index 08e711ca..6e56ee5 100644
--- a/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
+++ b/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
@@ -31,7 +31,6 @@
using namespace clang;
using namespace clang::extractapi;
using namespace llvm;
-using namespace llvm::json;
namespace {
@@ -1036,9 +1035,9 @@ void SymbolGraphSerializer::serializeGraphToStream(
ExtendedModule &&EM) {
Object Root = serializeGraph(ModuleName, std::move(EM));
if (Options.Compact)
- OS << formatv("{0}", Value(std::move(Root))) << "\n";
+ OS << formatv("{0}", json::Value(std::move(Root))) << "\n";
else
- OS << formatv("{0:2}", Value(std::move(Root))) << "\n";
+ OS << formatv("{0:2}", json::Value(std::move(Root))) << "\n";
}
void SymbolGraphSerializer::serializeMainSymbolGraph(
diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp
index d6061c2..eb96b54 100644
--- a/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/clang/lib/Format/UnwrappedLineParser.cpp
@@ -1181,10 +1181,10 @@ void UnwrappedLineParser::parsePPDefine() {
Line->InMacroBody = true;
if (Style.SkipMacroDefinitionBody) {
- do {
+ while (!eof()) {
FormatTok->Finalized = true;
- nextToken();
- } while (!eof());
+ FormatTok = Tokens->getNextToken();
+ }
addUnwrappedLine();
return;
}
diff --git a/clang/lib/Frontend/FrontendActions.cpp b/clang/lib/Frontend/FrontendActions.cpp
index 1812b85..4f06432 100644
--- a/clang/lib/Frontend/FrontendActions.cpp
+++ b/clang/lib/Frontend/FrontendActions.cpp
@@ -1169,8 +1169,8 @@ void PrintDependencyDirectivesSourceMinimizerAction::ExecuteAction() {
llvm::SmallVector<dependency_directives_scan::Token, 16> Tokens;
llvm::SmallVector<dependency_directives_scan::Directive, 32> Directives;
if (scanSourceForDependencyDirectives(
- FromFile.getBuffer(), Tokens, Directives, CI.getLangOpts(),
- &CI.getDiagnostics(), SM.getLocForStartOfFile(SM.getMainFileID()))) {
+ FromFile.getBuffer(), Tokens, Directives, &CI.getDiagnostics(),
+ SM.getLocForStartOfFile(SM.getMainFileID()))) {
assert(CI.getDiagnostics().hasErrorOccurred() &&
"no errors reported for failure");
diff --git a/clang/lib/Interpreter/IncrementalParser.cpp b/clang/lib/Interpreter/IncrementalParser.cpp
index 5bc8385..a8d0294 100644
--- a/clang/lib/Interpreter/IncrementalParser.cpp
+++ b/clang/lib/Interpreter/IncrementalParser.cpp
@@ -413,7 +413,8 @@ void IncrementalParser::CleanUpPTU(PartialTranslationUnit &PTU) {
if (!ND)
continue;
// Check if we need to clean up the IdResolver chain.
- if (ND->getDeclName().getFETokenInfo())
+ if (ND->getDeclName().getFETokenInfo() && !D->getLangOpts().ObjC &&
+ !D->getLangOpts().CPlusPlus)
getCI()->getSema().IdResolver.RemoveDecl(ND);
}
}
diff --git a/clang/lib/Interpreter/Interpreter.cpp b/clang/lib/Interpreter/Interpreter.cpp
index 683f87e..7a95278 100644
--- a/clang/lib/Interpreter/Interpreter.cpp
+++ b/clang/lib/Interpreter/Interpreter.cpp
@@ -42,6 +42,9 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/Host.h"
+
+#include <cstdarg>
+
using namespace clang;
// FIXME: Figure out how to unify with namespace init_convenience from
@@ -270,14 +273,10 @@ Interpreter::~Interpreter() {
// can't find the precise resource directory in unittests so we have to hard
// code them.
const char *const Runtimes = R"(
+ #define __CLANG_REPL__ 1
#ifdef __cplusplus
+ #define EXTERN_C extern "C"
void *__clang_Interpreter_SetValueWithAlloc(void*, void*, void*);
- void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*);
- void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*, void*);
- void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*, float);
- void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*, double);
- void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*, long double);
- void __clang_Interpreter_SetValueNoAlloc(void*,void*,void*,unsigned long long);
struct __clang_Interpreter_NewTag{} __ci_newtag;
void* operator new(__SIZE_TYPE__, void* __p, __clang_Interpreter_NewTag) noexcept;
template <class T, class = T (*)() /*disable for arrays*/>
@@ -289,7 +288,11 @@ const char *const Runtimes = R"(
void __clang_Interpreter_SetValueCopyArr(const T (*Src)[N], void* Placement, unsigned long Size) {
__clang_Interpreter_SetValueCopyArr(Src[0], Placement, Size);
}
+#else
+ #define EXTERN_C extern
#endif // __cplusplus
+
+ EXTERN_C void __clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType, ...);
)";
llvm::Expected<std::unique_ptr<Interpreter>>
@@ -588,15 +591,17 @@ std::unique_ptr<RuntimeInterfaceBuilder> Interpreter::FindRuntimeInterface() {
if (!LookupInterface(ValuePrintingInfo[NoAlloc],
MagicRuntimeInterface[NoAlloc]))
return nullptr;
- if (!LookupInterface(ValuePrintingInfo[WithAlloc],
- MagicRuntimeInterface[WithAlloc]))
- return nullptr;
- if (!LookupInterface(ValuePrintingInfo[CopyArray],
- MagicRuntimeInterface[CopyArray]))
- return nullptr;
- if (!LookupInterface(ValuePrintingInfo[NewTag],
- MagicRuntimeInterface[NewTag]))
- return nullptr;
+ if (Ctx.getLangOpts().CPlusPlus) {
+ if (!LookupInterface(ValuePrintingInfo[WithAlloc],
+ MagicRuntimeInterface[WithAlloc]))
+ return nullptr;
+ if (!LookupInterface(ValuePrintingInfo[CopyArray],
+ MagicRuntimeInterface[CopyArray]))
+ return nullptr;
+ if (!LookupInterface(ValuePrintingInfo[NewTag],
+ MagicRuntimeInterface[NewTag]))
+ return nullptr;
+ }
return createInProcessRuntimeInterfaceBuilder(*this, Ctx, S);
}
@@ -855,69 +860,81 @@ __clang_Interpreter_SetValueWithAlloc(void *This, void *OutVal,
return VRef.getPtr();
}
-// Pointers, lvalue struct that can take as a reference.
-REPL_EXTERNAL_VISIBILITY void
-__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
- void *Val) {
+extern "C" void REPL_EXTERNAL_VISIBILITY __clang_Interpreter_SetValueNoAlloc(
+ void *This, void *OutVal, void *OpaqueType, ...) {
Value &VRef = *(Value *)OutVal;
- VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
- VRef.setPtr(Val);
-}
+ Interpreter *I = static_cast<Interpreter *>(This);
+ VRef = Value(I, OpaqueType);
+ if (VRef.isVoid())
+ return;
-REPL_EXTERNAL_VISIBILITY void
-__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal,
- void *OpaqueType) {
- Value &VRef = *(Value *)OutVal;
- VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
-}
+ va_list args;
+ va_start(args, /*last named param*/ OpaqueType);
-static void SetValueDataBasedOnQualType(Value &V, unsigned long long Data) {
- QualType QT = V.getType();
- if (const auto *ET = QT->getAs<EnumType>())
- QT = ET->getDecl()->getIntegerType();
-
- switch (QT->castAs<BuiltinType>()->getKind()) {
- default:
- llvm_unreachable("unknown type kind!");
-#define X(type, name) \
- case BuiltinType::name: \
- V.set##name(Data); \
- break;
- REPL_BUILTIN_TYPES
-#undef X
+ QualType QT = VRef.getType();
+ if (VRef.getKind() == Value::K_PtrOrObj) {
+ VRef.setPtr(va_arg(args, void *));
+ } else {
+ if (const auto *ET = QT->getAs<EnumType>())
+ QT = ET->getDecl()->getIntegerType();
+ switch (QT->castAs<BuiltinType>()->getKind()) {
+ default:
+ llvm_unreachable("unknown type kind!");
+ break;
+ // Types shorter than int are resolved as int, else va_arg has UB.
+ case BuiltinType::Bool:
+ VRef.setBool(va_arg(args, int));
+ break;
+ case BuiltinType::Char_S:
+ VRef.setChar_S(va_arg(args, int));
+ break;
+ case BuiltinType::SChar:
+ VRef.setSChar(va_arg(args, int));
+ break;
+ case BuiltinType::Char_U:
+ VRef.setChar_U(va_arg(args, unsigned));
+ break;
+ case BuiltinType::UChar:
+ VRef.setUChar(va_arg(args, unsigned));
+ break;
+ case BuiltinType::Short:
+ VRef.setShort(va_arg(args, int));
+ break;
+ case BuiltinType::UShort:
+ VRef.setUShort(va_arg(args, unsigned));
+ break;
+ case BuiltinType::Int:
+ VRef.setInt(va_arg(args, int));
+ break;
+ case BuiltinType::UInt:
+ VRef.setUInt(va_arg(args, unsigned));
+ break;
+ case BuiltinType::Long:
+ VRef.setLong(va_arg(args, long));
+ break;
+ case BuiltinType::ULong:
+ VRef.setULong(va_arg(args, unsigned long));
+ break;
+ case BuiltinType::LongLong:
+ VRef.setLongLong(va_arg(args, long long));
+ break;
+ case BuiltinType::ULongLong:
+ VRef.setULongLong(va_arg(args, unsigned long long));
+ break;
+ // Types shorter than double are resolved as double, else va_arg has UB.
+ case BuiltinType::Float:
+ VRef.setFloat(va_arg(args, double));
+ break;
+ case BuiltinType::Double:
+ VRef.setDouble(va_arg(args, double));
+ break;
+ case BuiltinType::LongDouble:
+ VRef.setLongDouble(va_arg(args, long double));
+ break;
+ // See REPL_BUILTIN_TYPES.
+ }
}
-}
-
-REPL_EXTERNAL_VISIBILITY void
-__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
- unsigned long long Val) {
- Value &VRef = *(Value *)OutVal;
- VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
- SetValueDataBasedOnQualType(VRef, Val);
-}
-
-REPL_EXTERNAL_VISIBILITY void
-__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
- float Val) {
- Value &VRef = *(Value *)OutVal;
- VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
- VRef.setFloat(Val);
-}
-
-REPL_EXTERNAL_VISIBILITY void
-__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
- double Val) {
- Value &VRef = *(Value *)OutVal;
- VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
- VRef.setDouble(Val);
-}
-
-REPL_EXTERNAL_VISIBILITY void
-__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
- long double Val) {
- Value &VRef = *(Value *)OutVal;
- VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
- VRef.setLongDouble(Val);
+ va_end(args);
}
// A trampoline to work around the fact that operator placement new cannot
diff --git a/clang/lib/Lex/DependencyDirectivesScanner.cpp b/clang/lib/Lex/DependencyDirectivesScanner.cpp
index fda54d3..0971daa 100644
--- a/clang/lib/Lex/DependencyDirectivesScanner.cpp
+++ b/clang/lib/Lex/DependencyDirectivesScanner.cpp
@@ -62,17 +62,14 @@ struct DirectiveWithTokens {
struct Scanner {
Scanner(StringRef Input,
SmallVectorImpl<dependency_directives_scan::Token> &Tokens,
- DiagnosticsEngine *Diags, SourceLocation InputSourceLoc,
- const LangOptions &LangOpts)
+ DiagnosticsEngine *Diags, SourceLocation InputSourceLoc)
: Input(Input), Tokens(Tokens), Diags(Diags),
- InputSourceLoc(InputSourceLoc),
- LangOpts(getLangOptsForDepScanning(LangOpts)),
- TheLexer(InputSourceLoc, this->LangOpts, Input.begin(), Input.begin(),
+ InputSourceLoc(InputSourceLoc), LangOpts(getLangOptsForDepScanning()),
+ TheLexer(InputSourceLoc, LangOpts, Input.begin(), Input.begin(),
Input.end()) {}
- static LangOptions
- getLangOptsForDepScanning(const LangOptions &invocationLangOpts) {
- LangOptions LangOpts(invocationLangOpts);
+ static LangOptions getLangOptsForDepScanning() {
+ LangOptions LangOpts;
// Set the lexer to use 'tok::at' for '@', instead of 'tok::unknown'.
LangOpts.ObjC = true;
LangOpts.LineComment = true;
@@ -703,7 +700,7 @@ bool Scanner::lex_Pragma(const char *&First, const char *const End) {
SmallVector<dependency_directives_scan::Token> DiscardTokens;
const char *Begin = Buffer.c_str();
Scanner PragmaScanner{StringRef(Begin, Buffer.size()), DiscardTokens, Diags,
- InputSourceLoc, LangOptions()};
+ InputSourceLoc};
PragmaScanner.TheLexer.setParsingPreprocessorDirective(true);
if (PragmaScanner.lexPragma(Begin, Buffer.end()))
@@ -953,10 +950,9 @@ bool Scanner::scan(SmallVectorImpl<Directive> &Directives) {
bool clang::scanSourceForDependencyDirectives(
StringRef Input, SmallVectorImpl<dependency_directives_scan::Token> &Tokens,
- SmallVectorImpl<Directive> &Directives, const LangOptions &LangOpts,
- DiagnosticsEngine *Diags, SourceLocation InputSourceLoc) {
- return Scanner(Input, Tokens, Diags, InputSourceLoc, LangOpts)
- .scan(Directives);
+ SmallVectorImpl<Directive> &Directives, DiagnosticsEngine *Diags,
+ SourceLocation InputSourceLoc) {
+ return Scanner(Input, Tokens, Diags, InputSourceLoc).scan(Directives);
}
void clang::printDependencyDirectivesAsSource(
diff --git a/clang/lib/Parse/ParseStmt.cpp b/clang/lib/Parse/ParseStmt.cpp
index c252032..16a5b74 100644
--- a/clang/lib/Parse/ParseStmt.cpp
+++ b/clang/lib/Parse/ParseStmt.cpp
@@ -571,11 +571,8 @@ StmtResult Parser::ParseExprStatement(ParsedStmtContext StmtCtx) {
}
Token *CurTok = nullptr;
- // If the semicolon is missing at the end of REPL input, consider if
- // we want to do value printing. Note this is only enabled in C++ mode
- // since part of the implementation requires C++ language features.
// Note we shouldn't eat the token since the callback needs it.
- if (Tok.is(tok::annot_repl_input_end) && Actions.getLangOpts().CPlusPlus)
+ if (Tok.is(tok::annot_repl_input_end))
CurTok = &Tok;
else
// Otherwise, eat the semicolon.
diff --git a/clang/lib/Sema/Scope.cpp b/clang/lib/Sema/Scope.cpp
index c08073e..5bc7e79 100644
--- a/clang/lib/Sema/Scope.cpp
+++ b/clang/lib/Sema/Scope.cpp
@@ -228,7 +228,11 @@ void Scope::dumpImpl(raw_ostream &OS) const {
{CompoundStmtScope, "CompoundStmtScope"},
{ClassInheritanceScope, "ClassInheritanceScope"},
{CatchScope, "CatchScope"},
+ {ConditionVarScope, "ConditionVarScope"},
+ {OpenMPOrderClauseScope, "OpenMPOrderClauseScope"},
+ {LambdaScope, "LambdaScope"},
{OpenACCComputeConstructScope, "OpenACCComputeConstructScope"},
+ {TypeAliasScope, "TypeAliasScope"},
{FriendScope, "FriendScope"},
};
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index c446cc1..d11bc9e 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -31,9 +31,9 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
constexpr const int SizeIdx = 2;
llvm::APSInt Size;
Expr *ArgExpr = TheCall->getArg(SizeIdx);
- ExprResult R = SemaRef.VerifyIntegerConstantExpression(ArgExpr, &Size);
- if (R.isInvalid())
- return true;
+ [[maybe_unused]] ExprResult R =
+ SemaRef.VerifyIntegerConstantExpression(ArgExpr, &Size);
+ assert(!R.isInvalid());
switch (Size.getSExtValue()) {
case 1:
case 2:
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index a6734ef..4b9b735 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -2288,7 +2288,8 @@ void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
// Partial translation units that are created in incremental processing must
// not clean up the IdResolver because PTUs should take into account the
// declarations that came from previous PTUs.
- if (!PP.isIncrementalProcessingEnabled() || getLangOpts().ObjC)
+ if (!PP.isIncrementalProcessingEnabled() || getLangOpts().ObjC ||
+ getLangOpts().CPlusPlus)
IdResolver.RemoveDecl(D);
// Warn on it if we are shadowing a declaration.
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index fb5ca19..76145f2 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -5572,9 +5572,10 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
Res = Immediate.TransformInitializer(Param->getInit(),
/*NotCopy=*/false);
});
- if (Res.isUsable())
- Res = ConvertParamDefaultArgument(Param, Res.get(),
- Res.get()->getBeginLoc());
+ if (Res.isInvalid())
+ return ExprError();
+ Res = ConvertParamDefaultArgument(Param, Res.get(),
+ Res.get()->getBeginLoc());
if (Res.isInvalid())
return ExprError();
Init = Res.get();
@@ -5608,10 +5609,9 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
InitializationContext.emplace(Loc, Field, CurContext);
Expr *Init = nullptr;
- bool HasRewrittenInit = false;
bool NestedDefaultChecking = isCheckingDefaultArgumentOrInitializer();
- bool InLifetimeExtendingContext = isInLifetimeExtendingContext();
+
EnterExpressionEvaluationContext EvalContext(
*this, ExpressionEvaluationContext::PotentiallyEvaluated, Field);
@@ -5646,36 +5646,19 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
ImmediateCallVisitor V(getASTContext());
if (!NestedDefaultChecking)
V.TraverseDecl(Field);
-
- // CWG1815
- // Support lifetime extension of temporary created by aggregate
- // initialization using a default member initializer. We should always rebuild
- // the initializer if it contains any temporaries (if the initializer
- // expression is an ExprWithCleanups). Then make sure the normal lifetime
- // extension code recurses into the default initializer and does lifetime
- // extension when warranted.
- bool ContainsAnyTemporaries =
- isa_and_present<ExprWithCleanups>(Field->getInClassInitializer());
- if (V.HasImmediateCalls || InLifetimeExtendingContext ||
- ContainsAnyTemporaries) {
- HasRewrittenInit = true;
+ if (V.HasImmediateCalls) {
ExprEvalContexts.back().DelayedDefaultInitializationContext = {Loc, Field,
CurContext};
ExprEvalContexts.back().IsCurrentlyCheckingDefaultArgumentOrInitializer =
NestedDefaultChecking;
- // Pass down lifetime extending flag, and collect temporaries in
- // CreateMaterializeTemporaryExpr when we rewrite the call argument.
- keepInLifetimeExtendingContext();
+
EnsureImmediateInvocationInDefaultArgs Immediate(*this);
ExprResult Res;
-
- // Rebuild CXXDefaultInitExpr might cause diagnostics.
- SFINAETrap Trap(*this);
runWithSufficientStackSpace(Loc, [&] {
Res = Immediate.TransformInitializer(Field->getInClassInitializer(),
/*CXXDirectInit=*/false);
});
- if (Res.isUsable())
+ if (!Res.isInvalid())
Res = ConvertMemberDefaultInitExpression(Field, Res.get(), Loc);
if (Res.isInvalid()) {
Field->setInvalidDecl();
@@ -5702,7 +5685,7 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
return CXXDefaultInitExpr::Create(Context, InitializationContext->Loc,
Field, InitializationContext->Context,
- HasRewrittenInit ? Init : nullptr);
+ Init);
}
// DR1351:
diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp
index 4487c61..cf461a6 100644
--- a/clang/lib/Sema/SemaExprCXX.cpp
+++ b/clang/lib/Sema/SemaExprCXX.cpp
@@ -1555,6 +1555,9 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
bool ListInitialization) {
QualType Ty = TInfo->getType();
SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
+
+ assert((!ListInitialization || Exprs.size() == 1) &&
+ "List initialization must have exactly one expression.");
SourceRange FullRange = SourceRange(TyBeginLoc, RParenOrBraceLoc);
InitializedEntity Entity =
@@ -5126,6 +5129,7 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
case UTT_IsStandardLayout:
case UTT_IsPOD:
case UTT_IsLiteral:
+ case UTT_IsBitwiseCloneable:
// By analogy, is_trivially_relocatable and is_trivially_equality_comparable
// impose the same constraints.
case UTT_IsTriviallyRelocatable:
@@ -5619,6 +5623,8 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
return C.hasUniqueObjectRepresentations(T);
case UTT_IsTriviallyRelocatable:
return T.isTriviallyRelocatableType(C);
+ case UTT_IsBitwiseCloneable:
+ return T.isBitwiseCloneableType(C);
case UTT_IsReferenceable:
return T.isReferenceable();
case UTT_CanPassInRegs:
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index 9ed3e8a..ed8b226 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -8063,6 +8063,11 @@ static void visitLocalsRetainedByInitializer(IndirectLocalPath &Path,
enum PathLifetimeKind {
/// Lifetime-extend along this path.
Extend,
+ /// We should lifetime-extend, but we don't because (due to technical
+ /// limitations) we can't. This happens for default member initializers,
+ /// which we don't clone for every use, so we don't have a unique
+ /// MaterializeTemporaryExpr to update.
+ ShouldExtend,
/// Do not lifetime extend along this path.
NoExtend
};
@@ -8074,7 +8079,7 @@ shouldLifetimeExtendThroughPath(const IndirectLocalPath &Path) {
PathLifetimeKind Kind = PathLifetimeKind::Extend;
for (auto Elem : Path) {
if (Elem.Kind == IndirectLocalPathEntry::DefaultInit)
- Kind = PathLifetimeKind::Extend;
+ Kind = PathLifetimeKind::ShouldExtend;
else if (Elem.Kind != IndirectLocalPathEntry::LambdaCaptureInit)
return PathLifetimeKind::NoExtend;
}
@@ -8194,6 +8199,18 @@ void Sema::checkInitializerLifetime(const InitializedEntity &Entity,
ExtendingEntity->allocateManglingNumber());
// Also visit the temporaries lifetime-extended by this initializer.
return true;
+
+ case PathLifetimeKind::ShouldExtend:
+ // We're supposed to lifetime-extend the temporary along this path (per
+ // the resolution of DR1815), but we don't support that yet.
+ //
+ // FIXME: Properly handle this situation. Perhaps the easiest approach
+ // would be to clone the initializer expression on each use that would
+ // lifetime extend its temporaries.
+ Diag(DiagLoc, diag::warn_unsupported_lifetime_extension)
+ << RK << DiagRange;
+ break;
+
case PathLifetimeKind::NoExtend:
// If the path goes through the initialization of a variable or field,
// it can't possibly reach a temporary created in this full-expression.
diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp
index cdb60d4..97586a0 100644
--- a/clang/lib/Sema/SemaOpenACC.cpp
+++ b/clang/lib/Sema/SemaOpenACC.cpp
@@ -424,615 +424,736 @@ bool checkValidAfterDeviceType(
S.Diag(DeviceTypeClause.getBeginLoc(), diag::note_acc_previous_clause_here);
return true;
}
-} // namespace
-SemaOpenACC::SemaOpenACC(Sema &S) : SemaBase(S) {}
+class SemaOpenACCClauseVisitor {
+ SemaOpenACC &SemaRef;
+ ASTContext &Ctx;
+ ArrayRef<const OpenACCClause *> ExistingClauses;
+ bool NotImplemented = false;
-SemaOpenACC::AssociatedStmtRAII::AssociatedStmtRAII(SemaOpenACC &S,
- OpenACCDirectiveKind DK)
- : SemaRef(S), WasInsideComputeConstruct(S.InsideComputeConstruct),
- DirKind(DK) {
- // Compute constructs end up taking their 'loop'.
- if (DirKind == OpenACCDirectiveKind::Parallel ||
- DirKind == OpenACCDirectiveKind::Serial ||
- DirKind == OpenACCDirectiveKind::Kernels) {
- SemaRef.InsideComputeConstruct = true;
- SemaRef.ParentlessLoopConstructs.swap(ParentlessLoopConstructs);
+ OpenACCClause *isNotImplemented() {
+ NotImplemented = true;
+ return nullptr;
}
-}
-SemaOpenACC::AssociatedStmtRAII::~AssociatedStmtRAII() {
- SemaRef.InsideComputeConstruct = WasInsideComputeConstruct;
- if (DirKind == OpenACCDirectiveKind::Parallel ||
- DirKind == OpenACCDirectiveKind::Serial ||
- DirKind == OpenACCDirectiveKind::Kernels) {
- assert(SemaRef.ParentlessLoopConstructs.empty() &&
- "Didn't consume loop construct list?");
- SemaRef.ParentlessLoopConstructs.swap(ParentlessLoopConstructs);
- }
-}
+public:
+ SemaOpenACCClauseVisitor(SemaOpenACC &S,
+ ArrayRef<const OpenACCClause *> ExistingClauses)
+ : SemaRef(S), Ctx(S.getASTContext()), ExistingClauses(ExistingClauses) {}
+ // Once we've implemented everything, we shouldn't need this infrastructure.
+ // But in the meantime, we use this to help decide whether the clause was
+ // handled for this directive.
+ bool diagNotImplemented() { return NotImplemented; }
+
+ OpenACCClause *Visit(SemaOpenACC::OpenACCParsedClause &Clause) {
+ switch (Clause.getClauseKind()) {
+ case OpenACCClauseKind::Gang:
+ case OpenACCClauseKind::Worker:
+ case OpenACCClauseKind::Vector: {
+ // TODO OpenACC: These are only implemented enough for the 'seq' diagnostic,
+ // otherwise treats itself as unimplemented. When we implement these, we
+ // can remove them from here.
-OpenACCClause *
-SemaOpenACC::ActOnClause(ArrayRef<const OpenACCClause *> ExistingClauses,
- OpenACCParsedClause &Clause) {
- if (Clause.getClauseKind() == OpenACCClauseKind::Invalid)
- return nullptr;
+ // OpenACC 3.3 2.9:
+ // A 'gang', 'worker', or 'vector' clause may not appear if a 'seq' clause
+ // appears.
+ const auto *Itr =
+ llvm::find_if(ExistingClauses, llvm::IsaPred<OpenACCSeqClause>);
- // Diagnose that we don't support this clause on this directive.
- if (!doesClauseApplyToDirective(Clause.getDirectiveKind(),
- Clause.getClauseKind())) {
- Diag(Clause.getBeginLoc(), diag::err_acc_clause_appertainment)
- << Clause.getDirectiveKind() << Clause.getClauseKind();
- return nullptr;
+ if (Itr != ExistingClauses.end()) {
+ SemaRef.Diag(Clause.getBeginLoc(), diag::err_acc_clause_cannot_combine)
+ << Clause.getClauseKind() << (*Itr)->getClauseKind();
+ SemaRef.Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ }
+ return isNotImplemented();
}
- if (const auto *DevTypeClause =
- llvm::find_if(ExistingClauses,
- [&](const OpenACCClause *C) {
- return isa<OpenACCDeviceTypeClause>(C);
- });
- DevTypeClause != ExistingClauses.end()) {
- if (checkValidAfterDeviceType(
- *this, *cast<OpenACCDeviceTypeClause>(*DevTypeClause), Clause))
- return nullptr;
+#define VISIT_CLAUSE(CLAUSE_NAME) \
+ case OpenACCClauseKind::CLAUSE_NAME: \
+ return Visit##CLAUSE_NAME##Clause(Clause);
+#define CLAUSE_ALIAS(ALIAS, CLAUSE_NAME, DEPRECATED) \
+ case OpenACCClauseKind::ALIAS: \
+ if (DEPRECATED) \
+ SemaRef.Diag(Clause.getBeginLoc(), diag::warn_acc_deprecated_alias_name) \
+ << Clause.getClauseKind() << OpenACCClauseKind::CLAUSE_NAME; \
+ return Visit##CLAUSE_NAME##Clause(Clause);
+#include "clang/Basic/OpenACCClauses.def"
+ default:
+ return isNotImplemented();
+ }
+ llvm_unreachable("Invalid clause kind");
}
- switch (Clause.getClauseKind()) {
- case OpenACCClauseKind::Default: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
+#define VISIT_CLAUSE(CLAUSE_NAME) \
+ OpenACCClause *Visit##CLAUSE_NAME##Clause( \
+ SemaOpenACC::OpenACCParsedClause &Clause);
+#include "clang/Basic/OpenACCClauses.def"
+};
- // Don't add an invalid clause to the AST.
- if (Clause.getDefaultClauseKind() == OpenACCDefaultClauseKind::Invalid)
- return nullptr;
-
- // OpenACC 3.3, Section 2.5.4:
- // At most one 'default' clause may appear, and it must have a value of
- // either 'none' or 'present'.
- // Second half of the sentence is diagnosed during parsing.
- if (checkAlreadyHasClauseOfKind(*this, ExistingClauses, Clause))
- return nullptr;
-
- return OpenACCDefaultClause::Create(
- getASTContext(), Clause.getDefaultClauseKind(), Clause.getBeginLoc(),
- Clause.getLParenLoc(), Clause.getEndLoc());
- }
+OpenACCClause *SemaOpenACCClauseVisitor::VisitDefaultClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // Don't add an invalid clause to the AST.
+ if (Clause.getDefaultClauseKind() == OpenACCDefaultClauseKind::Invalid)
+ return nullptr;
- case OpenACCClauseKind::If: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
+ // OpenACC 3.3, Section 2.5.4:
+ // At most one 'default' clause may appear, and it must have a value of
+ // either 'none' or 'present'.
+ // Second half of the sentence is diagnosed during parsing.
+ if (checkAlreadyHasClauseOfKind(SemaRef, ExistingClauses, Clause))
+ return nullptr;
- // There is no prose in the standard that says duplicates aren't allowed,
- // but this diagnostic is present in other compilers, as well as makes
- // sense.
- if (checkAlreadyHasClauseOfKind(*this, ExistingClauses, Clause))
- return nullptr;
+ return OpenACCDefaultClause::Create(
+ Ctx, Clause.getDefaultClauseKind(), Clause.getBeginLoc(),
+ Clause.getLParenLoc(), Clause.getEndLoc());
+}
- // The parser has ensured that we have a proper condition expr, so there
- // isn't really much to do here.
+OpenACCClause *SemaOpenACCClauseVisitor::VisitIfClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // There is no prose in the standard that says duplicates aren't allowed,
+ // but this diagnostic is present in other compilers, as well as makes
+ // sense.
+ if (checkAlreadyHasClauseOfKind(SemaRef, ExistingClauses, Clause))
+ return nullptr;
- // If the 'if' clause is true, it makes the 'self' clause have no effect,
- // diagnose that here.
- // TODO OpenACC: When we add these two to other constructs, we might not
- // want to warn on this (for example, 'update').
- const auto *Itr =
- llvm::find_if(ExistingClauses, llvm::IsaPred<OpenACCSelfClause>);
- if (Itr != ExistingClauses.end()) {
- Diag(Clause.getBeginLoc(), diag::warn_acc_if_self_conflict);
- Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
- }
+ // The parser has ensured that we have a proper condition expr, so there
+ // isn't really much to do here.
- return OpenACCIfClause::Create(
- getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(),
- Clause.getConditionExpr(), Clause.getEndLoc());
+ // If the 'if' clause is true, it makes the 'self' clause have no effect,
+ // diagnose that here.
+ // TODO OpenACC: When we add these two to other constructs, we might not
+ // want to warn on this (for example, 'update').
+ const auto *Itr =
+ llvm::find_if(ExistingClauses, llvm::IsaPred<OpenACCSelfClause>);
+ if (Itr != ExistingClauses.end()) {
+ SemaRef.Diag(Clause.getBeginLoc(), diag::warn_acc_if_self_conflict);
+ SemaRef.Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
}
- case OpenACCClauseKind::Self: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
-
- // TODO OpenACC: When we implement this for 'update', this takes a
- // 'var-list' instead of a condition expression, so semantics/handling has
- // to happen differently here.
-
- // There is no prose in the standard that says duplicates aren't allowed,
- // but this diagnostic is present in other compilers, as well as makes
- // sense.
- if (checkAlreadyHasClauseOfKind(*this, ExistingClauses, Clause))
- return nullptr;
+ return OpenACCIfClause::Create(Ctx, Clause.getBeginLoc(),
+ Clause.getLParenLoc(),
+ Clause.getConditionExpr(), Clause.getEndLoc());
+}
- // If the 'if' clause is true, it makes the 'self' clause have no effect,
- // diagnose that here.
- // TODO OpenACC: When we add these two to other constructs, we might not
- // want to warn on this (for example, 'update').
- const auto *Itr =
- llvm::find_if(ExistingClauses, llvm::IsaPred<OpenACCIfClause>);
- if (Itr != ExistingClauses.end()) {
- Diag(Clause.getBeginLoc(), diag::warn_acc_if_self_conflict);
- Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
- }
+OpenACCClause *SemaOpenACCClauseVisitor::VisitSelfClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // TODO OpenACC: When we implement this for 'update', this takes a
+ // 'var-list' instead of a condition expression, so semantics/handling has
+ // to happen differently here.
+
+ // There is no prose in the standard that says duplicates aren't allowed,
+ // but this diagnostic is present in other compilers, as well as makes
+ // sense.
+ if (checkAlreadyHasClauseOfKind(SemaRef, ExistingClauses, Clause))
+ return nullptr;
- return OpenACCSelfClause::Create(
- getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(),
- Clause.getConditionExpr(), Clause.getEndLoc());
+ // If the 'if' clause is true, it makes the 'self' clause have no effect,
+ // diagnose that here.
+ // TODO OpenACC: When we add these two to other constructs, we might not
+ // want to warn on this (for example, 'update').
+ const auto *Itr =
+ llvm::find_if(ExistingClauses, llvm::IsaPred<OpenACCIfClause>);
+ if (Itr != ExistingClauses.end()) {
+ SemaRef.Diag(Clause.getBeginLoc(), diag::warn_acc_if_self_conflict);
+ SemaRef.Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
}
- case OpenACCClauseKind::NumGangs: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
+ return OpenACCSelfClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(),
+ Clause.getConditionExpr(), Clause.getEndLoc());
+}
- // There is no prose in the standard that says duplicates aren't allowed,
- // but this diagnostic is present in other compilers, as well as makes
- // sense.
- if (checkAlreadyHasClauseOfKind(*this, ExistingClauses, Clause))
- return nullptr;
+OpenACCClause *SemaOpenACCClauseVisitor::VisitNumGangsClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // There is no prose in the standard that says duplicates aren't allowed,
+ // but this diagnostic is present in other compilers, as well as makes
+ // sense.
+ if (checkAlreadyHasClauseOfKind(SemaRef, ExistingClauses, Clause))
+ return nullptr;
- if (Clause.getIntExprs().empty())
- Diag(Clause.getBeginLoc(), diag::err_acc_num_gangs_num_args)
- << /*NoArgs=*/0;
-
- unsigned MaxArgs =
- (Clause.getDirectiveKind() == OpenACCDirectiveKind::Parallel ||
- Clause.getDirectiveKind() == OpenACCDirectiveKind::ParallelLoop)
- ? 3
- : 1;
- if (Clause.getIntExprs().size() > MaxArgs)
- Diag(Clause.getBeginLoc(), diag::err_acc_num_gangs_num_args)
- << /*NoArgs=*/1 << Clause.getDirectiveKind() << MaxArgs
+ // num_gangs requires at least 1 int expr in all forms. Diagnose here, but
+ // allow us to continue, an empty clause might be useful for future
+ // diagnostics.
+ if (Clause.getIntExprs().empty())
+ SemaRef.Diag(Clause.getBeginLoc(), diag::err_acc_num_gangs_num_args)
+ << /*NoArgs=*/0;
+
+ unsigned MaxArgs =
+ (Clause.getDirectiveKind() == OpenACCDirectiveKind::Parallel ||
+ Clause.getDirectiveKind() == OpenACCDirectiveKind::ParallelLoop)
+ ? 3
+ : 1;
+ // The max number of args differs between parallel and other constructs.
+ // Again, allow us to continue for the purposes of future diagnostics.
+ if (Clause.getIntExprs().size() > MaxArgs)
+ SemaRef.Diag(Clause.getBeginLoc(), diag::err_acc_num_gangs_num_args)
+ << /*NoArgs=*/1 << Clause.getDirectiveKind() << MaxArgs
+ << Clause.getIntExprs().size();
+
+ // OpenACC 3.3 Section 2.5.4:
+ // A reduction clause may not appear on a parallel construct with a
+ // num_gangs clause that has more than one argument.
+ if (Clause.getDirectiveKind() == OpenACCDirectiveKind::Parallel &&
+ Clause.getIntExprs().size() > 1) {
+ auto *Parallel =
+ llvm::find_if(ExistingClauses, llvm::IsaPred<OpenACCReductionClause>);
+
+ if (Parallel != ExistingClauses.end()) {
+ SemaRef.Diag(Clause.getBeginLoc(),
+ diag::err_acc_reduction_num_gangs_conflict)
<< Clause.getIntExprs().size();
-
- // OpenACC 3.3 Section 2.5.4:
- // A reduction clause may not appear on a parallel construct with a
- // num_gangs clause that has more than one argument.
- if (Clause.getDirectiveKind() == OpenACCDirectiveKind::Parallel &&
- Clause.getIntExprs().size() > 1) {
- auto *Parallel =
- llvm::find_if(ExistingClauses, llvm::IsaPred<OpenACCReductionClause>);
-
- if (Parallel != ExistingClauses.end()) {
- Diag(Clause.getBeginLoc(), diag::err_acc_reduction_num_gangs_conflict)
- << Clause.getIntExprs().size();
- Diag((*Parallel)->getBeginLoc(), diag::note_acc_previous_clause_here);
- return nullptr;
- }
+ SemaRef.Diag((*Parallel)->getBeginLoc(),
+ diag::note_acc_previous_clause_here);
+ return nullptr;
}
-
- // Create the AST node for the clause even if the number of expressions is
- // incorrect.
- return OpenACCNumGangsClause::Create(
- getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(),
- Clause.getIntExprs(), Clause.getEndLoc());
- break;
}
- case OpenACCClauseKind::NumWorkers: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
+ return OpenACCNumGangsClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(), Clause.getIntExprs(),
+ Clause.getEndLoc());
+}
- // There is no prose in the standard that says duplicates aren't allowed,
- // but this diagnostic is present in other compilers, as well as makes
- // sense.
- if (checkAlreadyHasClauseOfKind(*this, ExistingClauses, Clause))
- return nullptr;
+OpenACCClause *SemaOpenACCClauseVisitor::VisitNumWorkersClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // There is no prose in the standard that says duplicates aren't allowed,
+ // but this diagnostic is present in other compilers, as well as makes
+ // sense.
+ if (checkAlreadyHasClauseOfKind(SemaRef, ExistingClauses, Clause))
+ return nullptr;
- assert(Clause.getIntExprs().size() == 1 &&
- "Invalid number of expressions for NumWorkers");
- return OpenACCNumWorkersClause::Create(
- getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(),
- Clause.getIntExprs()[0], Clause.getEndLoc());
- }
- case OpenACCClauseKind::VectorLength: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
+ assert(Clause.getIntExprs().size() == 1 &&
+ "Invalid number of expressions for NumWorkers");
+ return OpenACCNumWorkersClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(), Clause.getIntExprs()[0],
+ Clause.getEndLoc());
+}
- // There is no prose in the standard that says duplicates aren't allowed,
- // but this diagnostic is present in other compilers, as well as makes
- // sense.
- if (checkAlreadyHasClauseOfKind(*this, ExistingClauses, Clause))
- return nullptr;
+OpenACCClause *SemaOpenACCClauseVisitor::VisitVectorLengthClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // There is no prose in the standard that says duplicates aren't allowed,
+ // but this diagnostic is present in other compilers, as well as makes
+ // sense.
+ if (checkAlreadyHasClauseOfKind(SemaRef, ExistingClauses, Clause))
+ return nullptr;
- assert(Clause.getIntExprs().size() == 1 &&
- "Invalid number of expressions for VectorLength");
- return OpenACCVectorLengthClause::Create(
- getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(),
- Clause.getIntExprs()[0], Clause.getEndLoc());
- }
- case OpenACCClauseKind::Async: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
+ assert(Clause.getIntExprs().size() == 1 &&
+ "Invalid number of expressions for NumWorkers");
+ return OpenACCVectorLengthClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(), Clause.getIntExprs()[0],
+ Clause.getEndLoc());
+}
- // There is no prose in the standard that says duplicates aren't allowed,
- // but this diagnostic is present in other compilers, as well as makes
- // sense.
- if (checkAlreadyHasClauseOfKind(*this, ExistingClauses, Clause))
- return nullptr;
+OpenACCClause *SemaOpenACCClauseVisitor::VisitAsyncClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // There is no prose in the standard that says duplicates aren't allowed,
+ // but this diagnostic is present in other compilers, as well as makes
+ // sense.
+ if (checkAlreadyHasClauseOfKind(SemaRef, ExistingClauses, Clause))
+ return nullptr;
- assert(Clause.getNumIntExprs() < 2 &&
- "Invalid number of expressions for Async");
+ assert(Clause.getNumIntExprs() < 2 &&
+ "Invalid number of expressions for Async");
+ return OpenACCAsyncClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(),
+ Clause.getNumIntExprs() != 0 ? Clause.getIntExprs()[0] : nullptr,
+ Clause.getEndLoc());
+}
- return OpenACCAsyncClause::Create(
- getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(),
- Clause.getNumIntExprs() != 0 ? Clause.getIntExprs()[0] : nullptr,
- Clause.getEndLoc());
- }
- case OpenACCClauseKind::Private: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
+OpenACCClause *SemaOpenACCClauseVisitor::VisitPrivateClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' and 'loop'
+ // constructs, and 'compute'/'loop' constructs are the only construct that
+ // can do anything with this yet, so skip/treat as unimplemented in this
+ // case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()) &&
+ Clause.getDirectiveKind() != OpenACCDirectiveKind::Loop)
+ return isNotImplemented();
+
+ // ActOnVar ensured that everything is a valid variable reference, so there
+ // really isn't anything to do here. GCC does some duplicate-finding, though
+ // it isn't apparent in the standard where this is justified.
+
+ return OpenACCPrivateClause::Create(Ctx, Clause.getBeginLoc(),
+ Clause.getLParenLoc(),
+ Clause.getVarList(), Clause.getEndLoc());
+}
- // ActOnVar ensured that everything is a valid variable reference, so there
- // really isn't anything to do here. GCC does some duplicate-finding, though
- // it isn't apparent in the standard where this is justified.
+OpenACCClause *SemaOpenACCClauseVisitor::VisitFirstPrivateClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // ActOnVar ensured that everything is a valid variable reference, so there
+ // really isn't anything to do here. GCC does some duplicate-finding, though
+ // it isn't apparent in the standard where this is justified.
+
+ return OpenACCFirstPrivateClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(), Clause.getVarList(),
+ Clause.getEndLoc());
+}
- return OpenACCPrivateClause::Create(
- getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(),
- Clause.getVarList(), Clause.getEndLoc());
- }
- case OpenACCClauseKind::FirstPrivate: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
+OpenACCClause *SemaOpenACCClauseVisitor::VisitNoCreateClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+ // ActOnVar ensured that everything is a valid variable reference, so there
+ // really isn't anything to do here. GCC does some duplicate-finding, though
+ // it isn't apparent in the standard where this is justified.
+
+ return OpenACCNoCreateClause::Create(Ctx, Clause.getBeginLoc(),
+ Clause.getLParenLoc(),
+ Clause.getVarList(), Clause.getEndLoc());
+}
- // ActOnVar ensured that everything is a valid variable reference, so there
- // really isn't anything to do here. GCC does some duplicate-finding, though
- // it isn't apparent in the standard where this is justified.
+OpenACCClause *SemaOpenACCClauseVisitor::VisitPresentClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+ // ActOnVar ensured that everything is a valid variable reference, so there
+ // really isn't anything to do here. GCC does some duplicate-finding, though
+ // it isn't apparent in the standard where this is justified.
+
+ return OpenACCPresentClause::Create(Ctx, Clause.getBeginLoc(),
+ Clause.getLParenLoc(),
+ Clause.getVarList(), Clause.getEndLoc());
+}
- return OpenACCFirstPrivateClause::Create(
- getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(),
- Clause.getVarList(), Clause.getEndLoc());
- }
- case OpenACCClauseKind::NoCreate: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
+OpenACCClause *SemaOpenACCClauseVisitor::VisitCopyClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+ // ActOnVar ensured that everything is a valid variable reference, so there
+ // really isn't anything to do here. GCC does some duplicate-finding, though
+ // it isn't apparent in the standard where this is justified.
+
+ return OpenACCCopyClause::Create(
+ Ctx, Clause.getClauseKind(), Clause.getBeginLoc(), Clause.getLParenLoc(),
+ Clause.getVarList(), Clause.getEndLoc());
+}
- // ActOnVar ensured that everything is a valid variable reference, so there
- // really isn't anything to do here. GCC does some duplicate-finding, though
- // it isn't apparent in the standard where this is justified.
+OpenACCClause *SemaOpenACCClauseVisitor::VisitCopyInClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+ // ActOnVar ensured that everything is a valid variable reference, so there
+ // really isn't anything to do here. GCC does some duplicate-finding, though
+ // it isn't apparent in the standard where this is justified.
+
+ return OpenACCCopyInClause::Create(
+ Ctx, Clause.getClauseKind(), Clause.getBeginLoc(), Clause.getLParenLoc(),
+ Clause.isReadOnly(), Clause.getVarList(), Clause.getEndLoc());
+}
- return OpenACCNoCreateClause::Create(
- getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(),
- Clause.getVarList(), Clause.getEndLoc());
- }
- case OpenACCClauseKind::Present: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
+OpenACCClause *SemaOpenACCClauseVisitor::VisitCopyOutClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+ // ActOnVar ensured that everything is a valid variable reference, so there
+ // really isn't anything to do here. GCC does some duplicate-finding, though
+ // it isn't apparent in the standard where this is justified.
+
+ return OpenACCCopyOutClause::Create(
+ Ctx, Clause.getClauseKind(), Clause.getBeginLoc(), Clause.getLParenLoc(),
+ Clause.isZero(), Clause.getVarList(), Clause.getEndLoc());
+}
- // ActOnVar ensured that everything is a valid variable reference, so there
- // really isn't anything to do here. GCC does some duplicate-finding, though
- // it isn't apparent in the standard where this is justified.
+OpenACCClause *SemaOpenACCClauseVisitor::VisitCreateClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+ // ActOnVar ensured that everything is a valid variable reference, so there
+ // really isn't anything to do here. GCC does some duplicate-finding, though
+ // it isn't apparent in the standard where this is justified.
+
+ return OpenACCCreateClause::Create(
+ Ctx, Clause.getClauseKind(), Clause.getBeginLoc(), Clause.getLParenLoc(),
+ Clause.isZero(), Clause.getVarList(), Clause.getEndLoc());
+}
- return OpenACCPresentClause::Create(
- getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(),
- Clause.getVarList(), Clause.getEndLoc());
- }
- case OpenACCClauseKind::PresentOrCopy:
- case OpenACCClauseKind::PCopy:
- Diag(Clause.getBeginLoc(), diag::warn_acc_deprecated_alias_name)
- << Clause.getClauseKind() << OpenACCClauseKind::Copy;
- LLVM_FALLTHROUGH;
- case OpenACCClauseKind::Copy: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
+OpenACCClause *SemaOpenACCClauseVisitor::VisitAttachClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // ActOnVar ensured that everything is a valid variable reference, but we
+ // still have to make sure it is a pointer type.
+ llvm::SmallVector<Expr *> VarList{Clause.getVarList().begin(),
+ Clause.getVarList().end()};
+ VarList.erase(std::remove_if(VarList.begin(), VarList.end(),
+ [&](Expr *E) {
+ return SemaRef.CheckVarIsPointerType(
+ OpenACCClauseKind::Attach, E);
+ }),
+ VarList.end());
+ Clause.setVarListDetails(VarList,
+ /*IsReadOnly=*/false, /*IsZero=*/false);
+ return OpenACCAttachClause::Create(Ctx, Clause.getBeginLoc(),
+ Clause.getLParenLoc(), Clause.getVarList(),
+ Clause.getEndLoc());
+}
- // ActOnVar ensured that everything is a valid variable reference, so there
- // really isn't anything to do here. GCC does some duplicate-finding, though
- // it isn't apparent in the standard where this is justified.
+OpenACCClause *SemaOpenACCClauseVisitor::VisitDevicePtrClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // ActOnVar ensured that everything is a valid variable reference, but we
+ // still have to make sure it is a pointer type.
+ llvm::SmallVector<Expr *> VarList{Clause.getVarList().begin(),
+ Clause.getVarList().end()};
+ VarList.erase(std::remove_if(VarList.begin(), VarList.end(),
+ [&](Expr *E) {
+ return SemaRef.CheckVarIsPointerType(
+ OpenACCClauseKind::DevicePtr, E);
+ }),
+ VarList.end());
+ Clause.setVarListDetails(VarList,
+ /*IsReadOnly=*/false, /*IsZero=*/false);
+
+ return OpenACCDevicePtrClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(), Clause.getVarList(),
+ Clause.getEndLoc());
+}
- return OpenACCCopyClause::Create(
- getASTContext(), Clause.getClauseKind(), Clause.getBeginLoc(),
- Clause.getLParenLoc(), Clause.getVarList(), Clause.getEndLoc());
- }
- case OpenACCClauseKind::PresentOrCopyIn:
- case OpenACCClauseKind::PCopyIn:
- Diag(Clause.getBeginLoc(), diag::warn_acc_deprecated_alias_name)
- << Clause.getClauseKind() << OpenACCClauseKind::CopyIn;
- LLVM_FALLTHROUGH;
- case OpenACCClauseKind::CopyIn: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
+OpenACCClause *SemaOpenACCClauseVisitor::VisitWaitClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ return OpenACCWaitClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(), Clause.getDevNumExpr(),
+ Clause.getQueuesLoc(), Clause.getQueueIdExprs(), Clause.getEndLoc());
+}
- // ActOnVar ensured that everything is a valid variable reference, so there
- // really isn't anything to do here. GCC does some duplicate-finding, though
- // it isn't apparent in the standard where this is justified.
+OpenACCClause *SemaOpenACCClauseVisitor::VisitDeviceTypeClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' and 'loop'
+ // constructs, and 'compute'/'loop' constructs are the only construct that
+ // can do anything with this yet, so skip/treat as unimplemented in this
+ // case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()) &&
+ Clause.getDirectiveKind() != OpenACCDirectiveKind::Loop)
+ return isNotImplemented();
+
+ // TODO OpenACC: Once we get enough of the CodeGen implemented that we have
+ // a source for the list of valid architectures, we need to warn on unknown
+ // identifiers here.
+
+ return OpenACCDeviceTypeClause::Create(
+ Ctx, Clause.getClauseKind(), Clause.getBeginLoc(), Clause.getLParenLoc(),
+ Clause.getDeviceTypeArchitectures(), Clause.getEndLoc());
+}
- return OpenACCCopyInClause::Create(
- getASTContext(), Clause.getClauseKind(), Clause.getBeginLoc(),
- Clause.getLParenLoc(), Clause.isReadOnly(), Clause.getVarList(),
- Clause.getEndLoc());
+OpenACCClause *SemaOpenACCClauseVisitor::VisitAutoClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'loop' constructs, and it is
+ // the only construct that can do anything with this, so skip/treat as
+ // unimplemented for the combined constructs.
+ if (Clause.getDirectiveKind() != OpenACCDirectiveKind::Loop)
+ return isNotImplemented();
+
+ // OpenACC 3.3 2.9:
+ // Only one of the seq, independent, and auto clauses may appear.
+ const auto *Itr =
+ llvm::find_if(ExistingClauses,
+ llvm::IsaPred<OpenACCIndependentClause, OpenACCSeqClause>);
+ if (Itr != ExistingClauses.end()) {
+ SemaRef.Diag(Clause.getBeginLoc(), diag::err_acc_loop_spec_conflict)
+ << Clause.getClauseKind() << Clause.getDirectiveKind();
+ SemaRef.Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ return nullptr;
}
- case OpenACCClauseKind::PresentOrCopyOut:
- case OpenACCClauseKind::PCopyOut:
- Diag(Clause.getBeginLoc(), diag::warn_acc_deprecated_alias_name)
- << Clause.getClauseKind() << OpenACCClauseKind::CopyOut;
- LLVM_FALLTHROUGH;
- case OpenACCClauseKind::CopyOut: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
- // ActOnVar ensured that everything is a valid variable reference, so there
- // really isn't anything to do here. GCC does some duplicate-finding, though
- // it isn't apparent in the standard where this is justified.
+ return OpenACCAutoClause::Create(Ctx, Clause.getBeginLoc(),
+ Clause.getEndLoc());
+}
- return OpenACCCopyOutClause::Create(
- getASTContext(), Clause.getClauseKind(), Clause.getBeginLoc(),
- Clause.getLParenLoc(), Clause.isZero(), Clause.getVarList(),
- Clause.getEndLoc());
+OpenACCClause *SemaOpenACCClauseVisitor::VisitIndependentClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'loop' constructs, and it is
+ // the only construct that can do anything with this, so skip/treat as
+ // unimplemented for the combined constructs.
+ if (Clause.getDirectiveKind() != OpenACCDirectiveKind::Loop)
+ return isNotImplemented();
+
+ // OpenACC 3.3 2.9:
+ // Only one of the seq, independent, and auto clauses may appear.
+ const auto *Itr = llvm::find_if(
+ ExistingClauses, llvm::IsaPred<OpenACCAutoClause, OpenACCSeqClause>);
+ if (Itr != ExistingClauses.end()) {
+ SemaRef.Diag(Clause.getBeginLoc(), diag::err_acc_loop_spec_conflict)
+ << Clause.getClauseKind() << Clause.getDirectiveKind();
+ SemaRef.Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ return nullptr;
}
- case OpenACCClauseKind::PresentOrCreate:
- case OpenACCClauseKind::PCreate:
- Diag(Clause.getBeginLoc(), diag::warn_acc_deprecated_alias_name)
- << Clause.getClauseKind() << OpenACCClauseKind::Create;
- LLVM_FALLTHROUGH;
- case OpenACCClauseKind::Create: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
-
- // ActOnVar ensured that everything is a valid variable reference, so there
- // really isn't anything to do here. GCC does some duplicate-finding, though
- // it isn't apparent in the standard where this is justified.
- return OpenACCCreateClause::Create(getASTContext(), Clause.getClauseKind(),
- Clause.getBeginLoc(),
- Clause.getLParenLoc(), Clause.isZero(),
- Clause.getVarList(), Clause.getEndLoc());
- }
- case OpenACCClauseKind::Attach: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
+ return OpenACCIndependentClause::Create(Ctx, Clause.getBeginLoc(),
+ Clause.getEndLoc());
+}
- // ActOnVar ensured that everything is a valid variable reference, but we
- // still have to make sure it is a pointer type.
- llvm::SmallVector<Expr *> VarList{Clause.getVarList().begin(),
- Clause.getVarList().end()};
- VarList.erase(std::remove_if(VarList.begin(), VarList.end(), [&](Expr *E) {
- return CheckVarIsPointerType(OpenACCClauseKind::Attach, E);
- }), VarList.end());
- Clause.setVarListDetails(VarList,
- /*IsReadOnly=*/false, /*IsZero=*/false);
-
- return OpenACCAttachClause::Create(getASTContext(), Clause.getBeginLoc(),
- Clause.getLParenLoc(),
- Clause.getVarList(), Clause.getEndLoc());
+OpenACCClause *SemaOpenACCClauseVisitor::VisitSeqClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'loop' constructs, and it is
+ // the only construct that can do anything with this, so skip/treat as
+ // unimplemented for the combined constructs.
+ if (Clause.getDirectiveKind() != OpenACCDirectiveKind::Loop)
+ return isNotImplemented();
+
+ // OpenACC 3.3 2.9:
+ // Only one of the seq, independent, and auto clauses may appear.
+ const auto *Itr =
+ llvm::find_if(ExistingClauses,
+ llvm::IsaPred<OpenACCAutoClause, OpenACCIndependentClause>);
+ if (Itr != ExistingClauses.end()) {
+ SemaRef.Diag(Clause.getBeginLoc(), diag::err_acc_loop_spec_conflict)
+ << Clause.getClauseKind() << Clause.getDirectiveKind();
+ SemaRef.Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ return nullptr;
}
- case OpenACCClauseKind::DevicePtr: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
- // ActOnVar ensured that everything is a valid variable reference, but we
- // still have to make sure it is a pointer type.
- llvm::SmallVector<Expr *> VarList{Clause.getVarList().begin(),
- Clause.getVarList().end()};
- VarList.erase(std::remove_if(VarList.begin(), VarList.end(), [&](Expr *E) {
- return CheckVarIsPointerType(OpenACCClauseKind::DevicePtr, E);
- }), VarList.end());
- Clause.setVarListDetails(VarList,
- /*IsReadOnly=*/false, /*IsZero=*/false);
-
- return OpenACCDevicePtrClause::Create(
- getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(),
- Clause.getVarList(), Clause.getEndLoc());
- }
- case OpenACCClauseKind::Wait: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
+ // OpenACC 3.3 2.9:
+ // A 'gang', 'worker', or 'vector' clause may not appear if a 'seq' clause
+ // appears.
+ Itr = llvm::find_if(ExistingClauses,
+ llvm::IsaPred<OpenACCGangClause, OpenACCWorkerClause,
+ OpenACCVectorClause>);
- return OpenACCWaitClause::Create(
- getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(),
- Clause.getDevNumExpr(), Clause.getQueuesLoc(), Clause.getQueueIdExprs(),
- Clause.getEndLoc());
+ if (Itr != ExistingClauses.end()) {
+ SemaRef.Diag(Clause.getBeginLoc(), diag::err_acc_clause_cannot_combine)
+ << Clause.getClauseKind() << (*Itr)->getClauseKind();
+ SemaRef.Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ return nullptr;
}
- case OpenACCClauseKind::DType:
- case OpenACCClauseKind::DeviceType: {
- // Restrictions only properly implemented on 'compute' and 'loop'
- // constructs, and 'compute'/'loop' constructs are the only construct that
- // can do anything with this yet, so skip/treat as unimplemented in this
- // case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()) &&
- Clause.getDirectiveKind() != OpenACCDirectiveKind::Loop)
- break;
- // TODO OpenACC: Once we get enough of the CodeGen implemented that we have
- // a source for the list of valid architectures, we need to warn on unknown
- // identifiers here.
-
- return OpenACCDeviceTypeClause::Create(
- getASTContext(), Clause.getClauseKind(), Clause.getBeginLoc(),
- Clause.getLParenLoc(), Clause.getDeviceTypeArchitectures(),
- Clause.getEndLoc());
- }
- case OpenACCClauseKind::Auto: {
- // Restrictions only properly implemented on 'loop' constructs, and it is
- // the only construct that can do anything with this, so skip/treat as
- // unimplemented for the combined constructs.
- if (Clause.getDirectiveKind() != OpenACCDirectiveKind::Loop)
- break;
+ // TODO OpenACC: 2.9 ~ line 2010 specifies that the associated loop has some
+ // restrictions when there is a 'seq' clause in place. We probably need to
+ // implement that.
+ return OpenACCSeqClause::Create(Ctx, Clause.getBeginLoc(),
+ Clause.getEndLoc());
+}
- // OpenACC 3.3 2.9:
- // Only one of the seq, independent, and auto clauses may appear.
- const auto *Itr = llvm::find_if(
- ExistingClauses,
- llvm::IsaPred<OpenACCIndependentClause, OpenACCSeqClause>);
- if (Itr != ExistingClauses.end()) {
- Diag(Clause.getBeginLoc(), diag::err_acc_loop_spec_conflict)
- << Clause.getClauseKind() << Clause.getDirectiveKind();
- Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
+OpenACCClause *SemaOpenACCClauseVisitor::VisitReductionClause(
+ SemaOpenACC::OpenACCParsedClause &Clause) {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ return isNotImplemented();
+
+ // OpenACC 3.3 Section 2.5.4:
+ // A reduction clause may not appear on a parallel construct with a
+ // num_gangs clause that has more than one argument.
+ if (Clause.getDirectiveKind() == OpenACCDirectiveKind::Parallel) {
+ auto NumGangsClauses = llvm::make_filter_range(
+ ExistingClauses, llvm::IsaPred<OpenACCNumGangsClause>);
+
+ for (auto *NGC : NumGangsClauses) {
+ unsigned NumExprs =
+ cast<OpenACCNumGangsClause>(NGC)->getIntExprs().size();
+
+ if (NumExprs > 1) {
+ SemaRef.Diag(Clause.getBeginLoc(),
+ diag::err_acc_reduction_num_gangs_conflict)
+ << NumExprs;
+ SemaRef.Diag(NGC->getBeginLoc(), diag::note_acc_previous_clause_here);
+ return nullptr;
+ }
}
-
- return OpenACCAutoClause::Create(getASTContext(), Clause.getBeginLoc(),
- Clause.getEndLoc());
}
- case OpenACCClauseKind::Independent: {
- // Restrictions only properly implemented on 'loop' constructs, and it is
- // the only construct that can do anything with this, so skip/treat as
- // unimplemented for the combined constructs.
- if (Clause.getDirectiveKind() != OpenACCDirectiveKind::Loop)
- break;
- // OpenACC 3.3 2.9:
- // Only one of the seq, independent, and auto clauses may appear.
- const auto *Itr = llvm::find_if(
- ExistingClauses, llvm::IsaPred<OpenACCAutoClause, OpenACCSeqClause>);
- if (Itr != ExistingClauses.end()) {
- Diag(Clause.getBeginLoc(), diag::err_acc_loop_spec_conflict)
- << Clause.getClauseKind() << Clause.getDirectiveKind();
- Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
- }
+ SmallVector<Expr *> ValidVars;
+
+ for (Expr *Var : Clause.getVarList()) {
+ ExprResult Res = SemaRef.CheckReductionVar(Var);
- return OpenACCIndependentClause::Create(
- getASTContext(), Clause.getBeginLoc(), Clause.getEndLoc());
+ if (Res.isUsable())
+ ValidVars.push_back(Res.get());
}
- case OpenACCClauseKind::Seq: {
- // Restrictions only properly implemented on 'loop' constructs, and it is
- // the only construct that can do anything with this, so skip/treat as
- // unimplemented for the combined constructs.
- if (Clause.getDirectiveKind() != OpenACCDirectiveKind::Loop)
- break;
- // OpenACC 3.3 2.9:
- // Only one of the seq, independent, and auto clauses may appear.
- const auto *Itr = llvm::find_if(
- ExistingClauses,
- llvm::IsaPred<OpenACCAutoClause, OpenACCIndependentClause>);
- if (Itr != ExistingClauses.end()) {
- Diag(Clause.getBeginLoc(), diag::err_acc_loop_spec_conflict)
- << Clause.getClauseKind() << Clause.getDirectiveKind();
- Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
- }
+ return OpenACCReductionClause::Create(
+ Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(), Clause.getReductionOp(),
+ ValidVars, Clause.getEndLoc());
+}
- // OpenACC 3.3 2.9:
- // A 'gang', 'worker', or 'vector' clause may not appear if a 'seq' clause
- // appears.
- Itr = llvm::find_if(ExistingClauses,
- llvm::IsaPred<OpenACCGangClause, OpenACCWorkerClause,
- OpenACCVectorClause>);
+} // namespace
- if (Itr != ExistingClauses.end()) {
- Diag(Clause.getBeginLoc(), diag::err_acc_clause_cannot_combine)
- << Clause.getClauseKind() << (*Itr)->getClauseKind();
- Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
- }
+SemaOpenACC::SemaOpenACC(Sema &S) : SemaBase(S) {}
- // TODO OpenACC: 2.9 ~ line 2010 specifies that the associated loop has some
- // restrictions when there is a 'seq' clause in place. We probably need to
- // implement that.
- return OpenACCSeqClause::Create(getASTContext(), Clause.getBeginLoc(),
- Clause.getEndLoc());
+SemaOpenACC::AssociatedStmtRAII::AssociatedStmtRAII(SemaOpenACC &S,
+ OpenACCDirectiveKind DK)
+ : SemaRef(S), WasInsideComputeConstruct(S.InsideComputeConstruct),
+ DirKind(DK) {
+ // Compute constructs end up taking their 'loop'.
+ if (DirKind == OpenACCDirectiveKind::Parallel ||
+ DirKind == OpenACCDirectiveKind::Serial ||
+ DirKind == OpenACCDirectiveKind::Kernels) {
+ SemaRef.InsideComputeConstruct = true;
+ SemaRef.ParentlessLoopConstructs.swap(ParentlessLoopConstructs);
}
- case OpenACCClauseKind::Gang:
- case OpenACCClauseKind::Worker:
- case OpenACCClauseKind::Vector: {
- // OpenACC 3.3 2.9:
- // A 'gang', 'worker', or 'vector' clause may not appear if a 'seq' clause
- // appears.
- const auto *Itr =
- llvm::find_if(ExistingClauses, llvm::IsaPred<OpenACCSeqClause>);
+}
- if (Itr != ExistingClauses.end()) {
- Diag(Clause.getBeginLoc(), diag::err_acc_clause_cannot_combine)
- << Clause.getClauseKind() << (*Itr)->getClauseKind();
- Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
- }
- // Not yet implemented, so immediately drop to the 'not yet implemented'
- // diagnostic.
- break;
+SemaOpenACC::AssociatedStmtRAII::~AssociatedStmtRAII() {
+ SemaRef.InsideComputeConstruct = WasInsideComputeConstruct;
+ if (DirKind == OpenACCDirectiveKind::Parallel ||
+ DirKind == OpenACCDirectiveKind::Serial ||
+ DirKind == OpenACCDirectiveKind::Kernels) {
+ assert(SemaRef.ParentlessLoopConstructs.empty() &&
+ "Didn't consume loop construct list?");
+ SemaRef.ParentlessLoopConstructs.swap(ParentlessLoopConstructs);
}
- case OpenACCClauseKind::Reduction: {
- // Restrictions only properly implemented on 'compute' constructs, and
- // 'compute' constructs are the only construct that can do anything with
- // this yet, so skip/treat as unimplemented in this case.
- if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
- break;
-
- // OpenACC 3.3 Section 2.5.4:
- // A reduction clause may not appear on a parallel construct with a
- // num_gangs clause that has more than one argument.
- if (Clause.getDirectiveKind() == OpenACCDirectiveKind::Parallel) {
- auto NumGangsClauses = llvm::make_filter_range(
- ExistingClauses, llvm::IsaPred<OpenACCNumGangsClause>);
-
- for (auto *NGC : NumGangsClauses) {
- unsigned NumExprs =
- cast<OpenACCNumGangsClause>(NGC)->getIntExprs().size();
-
- if (NumExprs > 1) {
- Diag(Clause.getBeginLoc(), diag::err_acc_reduction_num_gangs_conflict)
- << NumExprs;
- Diag(NGC->getBeginLoc(), diag::note_acc_previous_clause_here);
- return nullptr;
- }
- }
- }
-
- SmallVector<Expr *> ValidVars;
-
- for (Expr *Var : Clause.getVarList()) {
- ExprResult Res = CheckReductionVar(Var);
+}
- if (Res.isUsable())
- ValidVars.push_back(Res.get());
- }
+OpenACCClause *
+SemaOpenACC::ActOnClause(ArrayRef<const OpenACCClause *> ExistingClauses,
+ OpenACCParsedClause &Clause) {
+ if (Clause.getClauseKind() == OpenACCClauseKind::Invalid)
+ return nullptr;
- return OpenACCReductionClause::Create(
- getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(),
- Clause.getReductionOp(), ValidVars, Clause.getEndLoc());
+ // Diagnose that we don't support this clause on this directive.
+ if (!doesClauseApplyToDirective(Clause.getDirectiveKind(),
+ Clause.getClauseKind())) {
+ Diag(Clause.getBeginLoc(), diag::err_acc_clause_appertainment)
+ << Clause.getDirectiveKind() << Clause.getClauseKind();
+ return nullptr;
}
- default:
- break;
+
+ if (const auto *DevTypeClause =
+ llvm::find_if(ExistingClauses,
+ [&](const OpenACCClause *C) {
+ return isa<OpenACCDeviceTypeClause>(C);
+ });
+ DevTypeClause != ExistingClauses.end()) {
+ if (checkValidAfterDeviceType(
+ *this, *cast<OpenACCDeviceTypeClause>(*DevTypeClause), Clause))
+ return nullptr;
}
- Diag(Clause.getBeginLoc(), diag::warn_acc_clause_unimplemented)
- << Clause.getClauseKind();
- return nullptr;
+ SemaOpenACCClauseVisitor Visitor{*this, ExistingClauses};
+ OpenACCClause *Result = Visitor.Visit(Clause);
+ assert((!Result || Result->getClauseKind() == Clause.getClauseKind()) &&
+ "Created wrong clause?");
+
+ if (Visitor.diagNotImplemented())
+ Diag(Clause.getBeginLoc(), diag::warn_acc_clause_unimplemented)
+ << Clause.getClauseKind();
+
+ return Result;
+
+ // switch (Clause.getClauseKind()) {
+ // case OpenACCClauseKind::PresentOrCopy:
+ // case OpenACCClauseKind::PCopy:
+ // Diag(Clause.getBeginLoc(), diag::warn_acc_deprecated_alias_name)
+ // << Clause.getClauseKind() << OpenACCClauseKind::Copy;
+ // LLVM_FALLTHROUGH;
+ // case OpenACCClauseKind::PresentOrCreate:
+ // case OpenACCClauseKind::PCreate:
+ // Diag(Clause.getBeginLoc(), diag::warn_acc_deprecated_alias_name)
+ // << Clause.getClauseKind() << OpenACCClauseKind::Create;
+ // LLVM_FALLTHROUGH;
+ //
+ //
+ //
+ //
+ // case OpenACCClauseKind::DType:
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ //
+ // case OpenACCClauseKind::Gang:
+ // case OpenACCClauseKind::Worker:
+ // case OpenACCClauseKind::Vector: {
+ // // OpenACC 3.3 2.9:
+ // // A 'gang', 'worker', or 'vector' clause may not appear if a 'seq'
+ // clause
+ // // appears.
+ // const auto *Itr =
+ // llvm::find_if(ExistingClauses, llvm::IsaPred<OpenACCSeqClause>);
+ //
+ // if (Itr != ExistingClauses.end()) {
+ // Diag(Clause.getBeginLoc(), diag::err_acc_clause_cannot_combine)
+ // << Clause.getClauseKind() << (*Itr)->getClauseKind();
+ // Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ // }
+ // // Not yet implemented, so immediately drop to the 'not yet implemented'
+ // // diagnostic.
+ // break;
+ // }
+ // */
+
}
/// OpenACC 3.3 section 2.5.15:
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index 95dd356..3bfda09 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -14172,13 +14172,6 @@ TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
if (TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
&ArgumentChanged))
return ExprError();
-
- if (E->isListInitialization() && !E->isStdInitListInitialization()) {
- ExprResult Res = RebuildInitList(E->getBeginLoc(), Args, E->getEndLoc());
- if (Res.isInvalid())
- return ExprError();
- Args = {Res.get()};
- }
}
if (!getDerived().AlwaysRebuild() &&
@@ -14190,9 +14183,12 @@ TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
return SemaRef.MaybeBindToTemporary(E);
}
+ // FIXME: We should just pass E->isListInitialization(), but we're not
+ // prepared to handle list-initialization without a child InitListExpr.
SourceLocation LParenLoc = T->getTypeLoc().getEndLoc();
return getDerived().RebuildCXXTemporaryObjectExpr(
- T, LParenLoc, Args, E->getEndLoc(), E->isListInitialization());
+ T, LParenLoc, Args, E->getEndLoc(),
+ /*ListInitialization=*/LParenLoc.isInvalid());
}
template<typename Derived>
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 290d966..197d673 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -1971,45 +1971,33 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
ExplodedNodeSet Tmp;
StmtNodeBuilder Bldr2(PreVisit, Tmp, *currBldrCtx);
- bool HasRewrittenInit = false;
- const Expr *ArgE = nullptr;
- if (const auto *DefE = dyn_cast<CXXDefaultArgExpr>(S)) {
+ const Expr *ArgE;
+ if (const auto *DefE = dyn_cast<CXXDefaultArgExpr>(S))
ArgE = DefE->getExpr();
- HasRewrittenInit = DefE->hasRewrittenInit();
- } else if (const auto *DefE = dyn_cast<CXXDefaultInitExpr>(S)) {
+ else if (const auto *DefE = dyn_cast<CXXDefaultInitExpr>(S))
ArgE = DefE->getExpr();
- HasRewrittenInit = DefE->hasRewrittenInit();
- } else
+ else
llvm_unreachable("unknown constant wrapper kind");
- if (HasRewrittenInit) {
- for (auto *N : PreVisit) {
- ProgramStateRef state = N->getState();
- const LocationContext *LCtx = N->getLocationContext();
- state = state->BindExpr(S, LCtx, state->getSVal(ArgE, LCtx));
- Bldr2.generateNode(S, N, state);
- }
- } else {
- // If it's not rewritten, the contents of these expressions are not
- // actually part of the current function, so we fall back to constant
- // evaluation.
- bool IsTemporary = false;
- if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(ArgE)) {
- ArgE = MTE->getSubExpr();
- IsTemporary = true;
- }
-
- std::optional<SVal> ConstantVal = svalBuilder.getConstantVal(ArgE);
- const LocationContext *LCtx = Pred->getLocationContext();
- for (auto *I : PreVisit) {
- ProgramStateRef State = I->getState();
- State = State->BindExpr(S, LCtx, ConstantVal.value_or(UnknownVal()));
- if (IsTemporary)
- State = createTemporaryRegionIfNeeded(State, LCtx, cast<Expr>(S),
- cast<Expr>(S));
+ bool IsTemporary = false;
+ if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(ArgE)) {
+ ArgE = MTE->getSubExpr();
+ IsTemporary = true;
+ }
- Bldr2.generateNode(S, I, State);
- }
+ std::optional<SVal> ConstantVal = svalBuilder.getConstantVal(ArgE);
+ if (!ConstantVal)
+ ConstantVal = UnknownVal();
+
+ const LocationContext *LCtx = Pred->getLocationContext();
+ for (const auto I : PreVisit) {
+ ProgramStateRef State = I->getState();
+ State = State->BindExpr(S, LCtx, *ConstantVal);
+ if (IsTemporary)
+ State = createTemporaryRegionIfNeeded(State, LCtx,
+ cast<Expr>(S),
+ cast<Expr>(S));
+ Bldr2.generateNode(S, I, State);
}
getCheckerManager().runCheckersForPostStmt(Dst, Tmp, S, *this);
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
index 66a2f6e..0cab17a 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
@@ -42,7 +42,7 @@ DependencyScanningWorkerFilesystem::readFile(StringRef Filename) {
}
bool DependencyScanningWorkerFilesystem::ensureDirectiveTokensArePopulated(
- EntryRef Ref, const LangOptions &LangOpts) {
+ EntryRef Ref) {
auto &Entry = Ref.Entry;
if (Entry.isError() || Entry.isDirectory())
@@ -66,7 +66,7 @@ bool DependencyScanningWorkerFilesystem::ensureDirectiveTokensArePopulated(
// dependencies.
if (scanSourceForDependencyDirectives(Contents->Original->getBuffer(),
Contents->DepDirectiveTokens,
- Directives, LangOpts)) {
+ Directives)) {
Contents->DepDirectiveTokens.clear();
// FIXME: Propagate the diagnostic if desired by the client.
Contents->DepDirectives.store(new std::optional<DependencyDirectivesTy>());
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
index 07e1960d..0f82f22 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
@@ -364,12 +364,11 @@ public:
// Use the dependency scanning optimized file system if requested to do so.
if (DepFS)
ScanInstance.getPreprocessorOpts().DependencyDirectivesForFile =
- [LocalDepFS = DepFS,
- &LangOpts = ScanInstance.getLangOpts()](FileEntryRef File)
+ [LocalDepFS = DepFS](FileEntryRef File)
-> std::optional<ArrayRef<dependency_directives_scan::Directive>> {
if (llvm::ErrorOr<EntryRef> Entry =
LocalDepFS->getOrCreateFileSystemEntry(File.getName()))
- if (LocalDepFS->ensureDirectiveTokensArePopulated(*Entry, LangOpts))
+ if (LocalDepFS->ensureDirectiveTokensArePopulated(*Entry))
return Entry->getDirectiveTokens();
return std::nullopt;
};
diff --git a/clang/test/AST/Interp/arrays.cpp b/clang/test/AST/Interp/arrays.cpp
index dd5064d..6f6fca8 100644
--- a/clang/test/AST/Interp/arrays.cpp
+++ b/clang/test/AST/Interp/arrays.cpp
@@ -609,3 +609,17 @@ namespace ArrayMemberAccess {
bool cond = a->x;
}
}
+
+namespace OnePastEndSub {
+ struct A {};
+ constexpr A a[3][3];
+ constexpr int diff2 = &a[1][3] - &a[1][0]; /// Used to crash.
+}
+
+static int same_entity_2[3];
+constexpr int *get2() {
+ // This is a redeclaration of the same entity, even though it doesn't
+ // inherit the type of the prior declaration.
+ extern int same_entity_2[];
+ return same_entity_2;
+}
diff --git a/clang/test/AST/Interp/cxx23.cpp b/clang/test/AST/Interp/cxx23.cpp
index c91d52c..1efd784 100644
--- a/clang/test/AST/Interp/cxx23.cpp
+++ b/clang/test/AST/Interp/cxx23.cpp
@@ -178,3 +178,25 @@ namespace ExplicitLambdaThis {
};
static_assert(f());
}
+
+namespace std {
+ struct strong_ordering {
+ int n;
+ constexpr operator int() const { return n; }
+ static const strong_ordering less, equal, greater;
+ };
+ constexpr strong_ordering strong_ordering::less = {-1};
+ constexpr strong_ordering strong_ordering::equal = {0};
+ constexpr strong_ordering strong_ordering::greater = {1};
+}
+
+namespace UndefinedThreeWay {
+ struct A {
+ friend constexpr std::strong_ordering operator<=>(const A&, const A&) = default; // all-note {{declared here}}
+ };
+
+ constexpr std::strong_ordering operator<=>(const A&, const A&) noexcept;
+ constexpr std::strong_ordering (*test_a_threeway)(const A&, const A&) = &operator<=>;
+ static_assert(!(*test_a_threeway)(A(), A())); // all-error {{static assertion expression is not an integral constant expression}} \
+ // all-note {{undefined function 'operator<=>' cannot be used in a constant expression}}
+}
diff --git a/clang/test/AST/Interp/eval-order.cpp b/clang/test/AST/Interp/eval-order.cpp
index aaf2b74..7a7ce6a 100644
--- a/clang/test/AST/Interp/eval-order.cpp
+++ b/clang/test/AST/Interp/eval-order.cpp
@@ -71,8 +71,8 @@ namespace EvalOrder {
// Rules 1 and 2 have no effect ('b' is not an expression).
// Rule 3: a->*b
- // SEQ(A(ud).*B(&UserDefined::n)); FIXME
- // SEQ(A(&ud)->*B(&UserDefined::n)); FIXME
+ SEQ(A(ud).*B(&UserDefined::n));
+ SEQ(A(&ud)->*B(&UserDefined::n));
// Rule 4: a(b1, b2, b3)
SEQ(A(f)(B(1), B(2), B(3))); // expected-error {{not an integral constant expression}} FIXME \
diff --git a/clang/test/AST/Interp/literals.cpp b/clang/test/AST/Interp/literals.cpp
index c160be0..5a29013 100644
--- a/clang/test/AST/Interp/literals.cpp
+++ b/clang/test/AST/Interp/literals.cpp
@@ -66,7 +66,12 @@ namespace ScalarTypes {
First = 0,
};
static_assert(getScalar<E>() == First, "");
- /// FIXME: Member pointers.
+
+ struct S {
+ int v;
+ };
+ constexpr int S::* MemberPtr = &S::v;
+ static_assert(getScalar<decltype(MemberPtr)>() == nullptr, "");
#if __cplusplus >= 201402L
constexpr void Void(int n) {
@@ -1204,7 +1209,7 @@ namespace incdecbool {
constexpr int externvar1() { // both-error {{never produces a constant expression}}
extern char arr[]; // ref-note {{declared here}}
return arr[0]; // ref-note {{read of non-constexpr variable 'arr'}} \
- // expected-note {{array-to-pointer decay of array member without known bound is not supported}}
+ // expected-note {{indexing of array without known bound}}
}
#endif
diff --git a/clang/test/AST/Interp/memberpointers.cpp b/clang/test/AST/Interp/memberpointers.cpp
new file mode 100644
index 0000000..54d73fe
--- /dev/null
+++ b/clang/test/AST/Interp/memberpointers.cpp
@@ -0,0 +1,197 @@
+// RUN: %clang_cc1 -std=c++14 -fexperimental-new-constant-interpreter -verify=expected,both %s
+// RUN: %clang_cc1 -std=c++14 -verify=ref,both %s
+
+namespace MemberPointers {
+ struct A {
+ constexpr A(int n) : n(n) {}
+ int n;
+ constexpr int f() const { return n + 3; }
+ };
+
+ constexpr A a(7);
+ static_assert(A(5).*&A::n == 5, "");
+ static_assert((&a)->*&A::n == 7, "");
+ static_assert((A(8).*&A::f)() == 11, "");
+ static_assert(((&a)->*&A::f)() == 10, "");
+
+ struct B : A {
+ constexpr B(int n, int m) : A(n), m(m) {}
+ int m;
+ constexpr int g() const { return n + m + 1; }
+ };
+ constexpr B b(9, 13);
+ static_assert(B(4, 11).*&A::n == 4, "");
+ static_assert(B(4, 11).*&B::m == 11, "");
+ static_assert(B(4, 11).m == 11, "");
+ static_assert(B(4, 11).*(int(A::*))&B::m == 11, "");
+ static_assert(B(4, 11).*&B::m == 11, "");
+ static_assert((&b)->*&A::n == 9, "");
+ static_assert((&b)->*&B::m == 13, "");
+ static_assert((&b)->*(int(A::*))&B::m == 13, "");
+ static_assert((B(4, 11).*&A::f)() == 7, "");
+ static_assert((B(4, 11).*&B::g)() == 16, "");
+
+ static_assert((B(4, 11).*(int(A::*)() const)&B::g)() == 16, "");
+
+ static_assert(((&b)->*&A::f)() == 12, "");
+ static_assert(((&b)->*&B::g)() == 23, "");
+ static_assert(((&b)->*(int(A::*)()const)&B::g)() == 23, "");
+
+
+ struct S {
+ constexpr S(int m, int n, int (S::*pf)() const, int S::*pn) :
+ m(m), n(n), pf(pf), pn(pn) {}
+ constexpr S() : m(), n(), pf(&S::f), pn(&S::n) {}
+
+ constexpr int f() const { return this->*pn; }
+ virtual int g() const;
+
+ int m, n;
+ int (S::*pf)() const;
+ int S::*pn;
+ };
+
+ constexpr int S::*pm = &S::m;
+ constexpr int S::*pn = &S::n;
+
+ constexpr int (S::*pf)() const = &S::f;
+ constexpr int (S::*pg)() const = &S::g;
+
+ constexpr S s(2, 5, &S::f, &S::m);
+
+ static_assert((s.*&S::f)() == 2, "");
+ static_assert((s.*s.pf)() == 2, "");
+
+ static_assert(pf == &S::f, "");
+
+ static_assert(pf == s.*&S::pf, "");
+
+ static_assert(pm == &S::m, "");
+ static_assert(pm != pn, "");
+ static_assert(s.pn != pn, "");
+ static_assert(s.pn == pm, "");
+ static_assert(pg != nullptr, "");
+ static_assert(pf != nullptr, "");
+ static_assert((int S::*)nullptr == nullptr, "");
+ static_assert(pg == pg, ""); // both-error {{constant expression}} \
+ // both-note {{comparison of pointer to virtual member function 'g' has unspecified value}}
+ static_assert(pf != pg, ""); // both-error {{constant expression}} \
+ // both-note {{comparison of pointer to virtual member function 'g' has unspecified value}}
+
+ template<int n> struct T : T<n-1> { const int X = n;};
+ template<> struct T<0> { int n; char k;};
+ template<> struct T<30> : T<29> { int m; };
+
+ T<17> t17;
+ T<30> t30;
+
+ constexpr int (T<15>::*deepm) = (int(T<10>::*))&T<30>::m;
+ constexpr int (T<10>::*deepn) = &T<0>::n;
+ constexpr char (T<10>::*deepk) = &T<0>::k;
+
+ static_assert(&(t17.*deepn) == &t17.n, "");
+ static_assert(&(t17.*deepk) == &t17.k, "");
+ static_assert(deepn == &T<2>::n, "");
+
+ constexpr int *pgood = &(t30.*deepm);
+ constexpr int *pbad = &(t17.*deepm); // both-error {{constant expression}}
+ static_assert(&(t30.*deepm) == &t30.m, "");
+
+ static_assert(deepm == &T<50>::m, "");
+ static_assert(deepm != deepn, "");
+
+ constexpr T<5> *p17_5 = &t17;
+ constexpr T<13> *p17_13 = (T<13>*)p17_5;
+ constexpr T<23> *p17_23 = (T<23>*)p17_13; // both-error {{constant expression}} \
+ // both-note {{cannot cast object of dynamic type 'T<17>' to type 'T<23>'}}
+ constexpr T<18> *p17_18 = (T<18>*)p17_13; // both-error {{constant expression}} \
+ // both-note {{cannot cast object of dynamic type 'T<17>' to type 'T<18>'}}
+ static_assert(&(p17_5->*(int(T<0>::*))deepn) == &t17.n, "");
+ static_assert(&(p17_5->*(int(T<0>::*))deepn), "");
+
+
+ static_assert(&(p17_13->*deepn) == &t17.n, "");
+ constexpr int *pbad2 = &(p17_13->*(int(T<9>::*))deepm); // both-error {{constant expression}}
+
+ constexpr T<5> *p30_5 = &t30;
+ constexpr T<23> *p30_23 = (T<23>*)p30_5;
+ constexpr T<13> *p30_13 = p30_23;
+ static_assert(&(p30_13->*deepn) == &t30.n, "");
+ static_assert(&(p30_23->*deepn) == &t30.n, "");
+ static_assert(&(p30_5->*(int(T<3>::*))deepn) == &t30.n, "");
+
+ static_assert(&(p30_5->*(int(T<2>::*))deepm) == &t30.m, "");
+ static_assert(&(((T<17>*)p30_13)->*deepm) == &t30.m, "");
+ static_assert(&(p30_23->*deepm) == &t30.m, "");
+
+
+ /// Added tests not from constant-expression-cxx11.cpp
+ static_assert(pm, "");
+ static_assert(!((int S::*)nullptr), "");
+ constexpr int S::*pk = nullptr;
+ static_assert(!pk, "");
+}
+
+namespace test3 {
+ struct nsCSSRect {
+ };
+ static int nsCSSRect::* sides;
+ nsCSSRect dimenX;
+ void ParseBoxCornerRadii(int y) {
+ switch (y) {
+ }
+ int& x = dimenX.*sides;
+ }
+}
+
+void foo() {
+ class X;
+ void (X::*d) ();
+ d = nullptr; /// This calls in the constant interpreter.
+}
+
+namespace {
+ struct A { int n; };
+ struct B { int n; };
+ struct C : A, B {};
+ struct D { double d; C c; };
+ const int &&u = static_cast<B&&>(0, ((D&&)D{}).*&D::c).n; // both-warning {{left operand of comma operator has no effect}}
+}
+
+/// From SemaTemplate/instantiate-member-pointers.cpp
+namespace {
+ struct Y {
+ int x;
+ };
+
+ template<typename T, typename Class, T Class::*Ptr>
+ struct X3 {
+ X3<T, Class, Ptr> &operator=(const T& value) {
+ return *this;
+ }
+ };
+
+ typedef int Y::*IntMember;
+ template<IntMember Member>
+ struct X4 {
+ X3<int, Y, Member> member;
+ int &getMember(Y& y) { return y.*Member; }
+ };
+
+ int &get_X4(X4<&Y::x> x4, Y& y) {
+ return x4.getMember(y);
+ }
+}
+
+/// From test/CXX/basic/basic.def.odr/p2.cpp
+namespace {
+ void use(int);
+ struct S { int x; int f() const; };
+ constexpr S *ps = nullptr;
+ S *const &psr = ps;
+
+ void test() {
+ use(ps->*&S::x);
+ use(psr->*&S::x);
+ }
+}
diff --git a/clang/test/AST/ast-dump-default-init-json.cpp b/clang/test/AST/ast-dump-default-init-json.cpp
index f4949a9..1058b4e 100644
--- a/clang/test/AST/ast-dump-default-init-json.cpp
+++ b/clang/test/AST/ast-dump-default-init-json.cpp
@@ -789,10 +789,10 @@ void test() {
// CHECK-NEXT: "valueCategory": "lvalue",
// CHECK-NEXT: "extendingDecl": {
// CHECK-NEXT: "id": "0x{{.*}}",
-// CHECK-NEXT: "kind": "VarDecl",
-// CHECK-NEXT: "name": "b",
+// CHECK-NEXT: "kind": "FieldDecl",
+// CHECK-NEXT: "name": "a",
// CHECK-NEXT: "type": {
-// CHECK-NEXT: "qualType": "B"
+// CHECK-NEXT: "qualType": "const A &"
// CHECK-NEXT: }
// CHECK-NEXT: },
// CHECK-NEXT: "storageDuration": "automatic",
diff --git a/clang/test/AST/ast-dump-default-init.cpp b/clang/test/AST/ast-dump-default-init.cpp
index 26864fb..15b29f0 100644
--- a/clang/test/AST/ast-dump-default-init.cpp
+++ b/clang/test/AST/ast-dump-default-init.cpp
@@ -13,7 +13,7 @@ void test() {
}
// CHECK: -CXXDefaultInitExpr 0x{{[^ ]*}} <{{.*}}> 'const A' lvalue has rewritten init
// CHECK-NEXT: `-ExprWithCleanups 0x{{[^ ]*}} <{{.*}}> 'const A' lvalue
-// CHECK-NEXT: `-MaterializeTemporaryExpr 0x{{[^ ]*}} <{{.*}}> 'const A' lvalue extended by Var 0x{{[^ ]*}} 'b' 'B'
+// CHECK-NEXT: `-MaterializeTemporaryExpr 0x{{[^ ]*}} <{{.*}}> 'const A' lvalue extended by Field 0x{{[^ ]*}} 'a' 'const A &'
// CHECK-NEXT: `-ImplicitCastExpr 0x{{[^ ]*}} <{{.*}}> 'const A' <NoOp>
// CHECK-NEXT: `-CXXFunctionalCastExpr 0x{{[^ ]*}} <{{.*}}> 'A' functional cast to A <NoOp>
// CHECK-NEXT: `-InitListExpr 0x{{[^ ]*}} <{{.*}}> 'A'
diff --git a/clang/test/AST/ast-print-openacc-loop-construct.cpp b/clang/test/AST/ast-print-openacc-loop-construct.cpp
index 519825b..cde302a 100644
--- a/clang/test/AST/ast-print-openacc-loop-construct.cpp
+++ b/clang/test/AST/ast-print-openacc-loop-construct.cpp
@@ -48,4 +48,13 @@ void foo() {
// CHECK-NEXT: ;
#pragma acc loop auto
for(;;);
+
+ int i;
+ float array[5];
+
+// CHECK: #pragma acc loop private(i, array[1], array, array[1:2])
+// CHECK-NEXT: for (;;)
+// CHECK-NEXT: ;
+#pragma acc loop private(i, array[1], array, array[1:2])
+ for(;;);
}
diff --git a/clang/test/Analysis/cxx-uninitialized-object.cpp b/clang/test/Analysis/cxx-uninitialized-object.cpp
index aee0dae..e3fa8ae 100644
--- a/clang/test/Analysis/cxx-uninitialized-object.cpp
+++ b/clang/test/Analysis/cxx-uninitialized-object.cpp
@@ -1114,27 +1114,27 @@ void fCXX11MemberInitTest1() {
CXX11MemberInitTest1();
}
-#ifdef PEDANTIC
struct CXX11MemberInitTest2 {
struct RecordType {
- int a; // expected-note {{uninitialized field 'this->a'}}
- int b; // expected-note {{uninitialized field 'this->b'}}
+ // TODO: we'd expect the note: {{uninitialized field 'this->rec.a'}}
+ int a; // no-note
+ // TODO: we'd expect the note: {{uninitialized field 'this->rec.b'}}
+ int b; // no-note
RecordType(int) {}
};
- RecordType rec = RecordType(int()); // expected-warning {{2 uninitialized fields}}
+ RecordType rec = RecordType(int());
int dontGetFilteredByNonPedanticMode = 0;
CXX11MemberInitTest2() {}
};
void fCXX11MemberInitTest2() {
+ // TODO: we'd expect the warning: {{2 uninitializeds field}}
CXX11MemberInitTest2(); // no-warning
}
-#endif // PEDANTIC
-
//===----------------------------------------------------------------------===//
// "Esoteric" primitive type tests.
//===----------------------------------------------------------------------===//
diff --git a/clang/test/Analysis/lifetime-extended-regions.cpp b/clang/test/Analysis/lifetime-extended-regions.cpp
index 524f4e0..4e98bd4 100644
--- a/clang/test/Analysis/lifetime-extended-regions.cpp
+++ b/clang/test/Analysis/lifetime-extended-regions.cpp
@@ -120,11 +120,11 @@ void aggregateWithReferences() {
clang_analyzer_dump(viaReference); // expected-warning-re {{&lifetime_extended_object{RefAggregate, viaReference, S{{[0-9]+}}} }}
clang_analyzer_dump(viaReference.rx); // expected-warning-re {{&lifetime_extended_object{int, viaReference, S{{[0-9]+}}} }}
clang_analyzer_dump(viaReference.ry); // expected-warning-re {{&lifetime_extended_object{Composite, viaReference, S{{[0-9]+}}} }}
-
- // The lifetime lifetime of object bound to reference members of aggregates,
- // that are created from default member initializer was extended.
- RefAggregate defaultInitExtended{i};
- clang_analyzer_dump(defaultInitExtended.ry); // expected-warning-re {{&lifetime_extended_object{Composite, defaultInitExtended, S{{[0-9]+}}} }}
+
+ // clang does not currently implement extending lifetime of object bound to reference members of aggregates,
+ // that are created from default member initializer (see `warn_unsupported_lifetime_extension` from `-Wdangling`)
+ RefAggregate defaultInitExtended{i}; // clang-bug does not extend `Composite`
+ clang_analyzer_dump(defaultInitExtended.ry); // expected-warning {{Unknown }}
}
void lambda() {
diff --git a/clang/test/CXX/drs/cwg16xx.cpp b/clang/test/CXX/drs/cwg16xx.cpp
index 82ef871..cf6b45c 100644
--- a/clang/test/CXX/drs/cwg16xx.cpp
+++ b/clang/test/CXX/drs/cwg16xx.cpp
@@ -483,6 +483,8 @@ namespace cwg1696 { // cwg1696: 7
const A &a = A(); // #cwg1696-D1-a
};
D1 d1 = {}; // #cwg1696-d1
+ // since-cxx14-warning@-1 {{lifetime extension of temporary created by aggregate initialization using a default member initializer is not yet supported; lifetime of temporary will end at the end of the full-expression}}
+ // since-cxx14-note@#cwg1696-D1-a {{initializing field 'a' with default member initializer}}
struct D2 {
const A &a = A(); // #cwg1696-D2-a
diff --git a/clang/test/CXX/drs/cwg18xx.cpp b/clang/test/CXX/drs/cwg18xx.cpp
index 054ce5a..323e56f 100644
--- a/clang/test/CXX/drs/cwg18xx.cpp
+++ b/clang/test/CXX/drs/cwg18xx.cpp
@@ -206,28 +206,19 @@ namespace cwg1814 { // cwg1814: yes
#endif
}
-namespace cwg1815 { // cwg1815: 19
+namespace cwg1815 { // cwg1815: no
#if __cplusplus >= 201402L
- struct A { int &&r = 0; };
+ // FIXME: needs codegen test
+ struct A { int &&r = 0; }; // #cwg1815-A
A a = {};
+ // since-cxx14-warning@-1 {{lifetime extension of temporary created by aggregate initialization using a default member initializer is not yet supported; lifetime of temporary will end at the end of the full-expression}} FIXME
+ // since-cxx14-note@#cwg1815-A {{initializing field 'r' with default member initializer}}
struct B { int &&r = 0; }; // #cwg1815-B
// since-cxx14-error@-1 {{reference member 'r' binds to a temporary object whose lifetime would be shorter than the lifetime of the constructed object}}
// since-cxx14-note@#cwg1815-B {{initializing field 'r' with default member initializer}}
// since-cxx14-note@#cwg1815-b {{in implicit default constructor for 'cwg1815::B' first required here}}
B b; // #cwg1815-b
-
-#if __cplusplus >= 201703L
- struct C { const int &r = 0; };
- constexpr C c = {}; // OK, since cwg1815
- static_assert(c.r == 0);
-
- constexpr int f() {
- A a = {}; // OK, since cwg1815
- return a.r;
- }
- static_assert(f() == 0);
-#endif
#endif
}
diff --git a/clang/test/CXX/special/class.temporary/p6.cpp b/clang/test/CXX/special/class.temporary/p6.cpp
index a6d2adf..5554363 100644
--- a/clang/test/CXX/special/class.temporary/p6.cpp
+++ b/clang/test/CXX/special/class.temporary/p6.cpp
@@ -269,40 +269,6 @@ void init_capture_init_list() {
// CHECK: }
}
-void check_dr1815() { // dr1815: yes
-#if __cplusplus >= 201402L
-
- struct A {
- int &&r = 0;
- ~A() {}
- };
-
- struct B {
- A &&a = A{};
- ~B() {}
- };
- B a = {};
-
- // CHECK: call {{.*}}block_scope_begin_function
- extern void block_scope_begin_function();
- extern void block_scope_end_function();
- block_scope_begin_function();
- {
- // CHECK: call void @_ZZ12check_dr1815vEN1BD1Ev
- // CHECK: call void @_ZZ12check_dr1815vEN1AD1Ev
- B b = {};
- }
- // CHECK: call {{.*}}block_scope_end_function
- block_scope_end_function();
-
- // CHECK: call {{.*}}some_other_function
- extern void some_other_function();
- some_other_function();
- // CHECK: call void @_ZZ12check_dr1815vEN1BD1Ev
- // CHECK: call void @_ZZ12check_dr1815vEN1AD1Ev
-#endif
-}
-
namespace P2718R0 {
namespace basic {
template <typename E> using T2 = std::list<E>;
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcpopv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcpopv.c
index 13748be..b87b225 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcpopv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcpopv.c
@@ -16,399 +16,399 @@
#include <riscv_vector.h>
-// CHECK-LABEL: @test_vcpopv_v_u8mf8(
+// CHECK-LABEL: @test_vcpop_v_u8mf8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcpopv.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vuint8mf8_t test_vcpopv_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf8(vs2, vl);
+vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf8(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf4(
+// CHECK-LABEL: @test_vcpop_v_u8mf4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcpopv.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vuint8mf4_t test_vcpopv_v_u8mf4(vuint8mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf4(vs2, vl);
+vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf4(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf2(
+// CHECK-LABEL: @test_vcpop_v_u8mf2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcpopv.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vuint8mf2_t test_vcpopv_v_u8mf2(vuint8mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf2(vs2, vl);
+vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf2(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m1(
+// CHECK-LABEL: @test_vcpop_v_u8m1(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcpopv.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vuint8m1_t test_vcpopv_v_u8m1(vuint8m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m1(vs2, vl);
+vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m1(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m2(
+// CHECK-LABEL: @test_vcpop_v_u8m2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcpopv.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vuint8m2_t test_vcpopv_v_u8m2(vuint8m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m2(vs2, vl);
+vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m2(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m4(
+// CHECK-LABEL: @test_vcpop_v_u8m4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcpopv.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vuint8m4_t test_vcpopv_v_u8m4(vuint8m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m4(vs2, vl);
+vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m4(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m8(
+// CHECK-LABEL: @test_vcpop_v_u8m8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcpopv.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vuint8m8_t test_vcpopv_v_u8m8(vuint8m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m8(vs2, vl);
+vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m8(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf4(
+// CHECK-LABEL: @test_vcpop_v_u16mf4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcpopv.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vuint16mf4_t test_vcpopv_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16mf4(vs2, vl);
+vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16mf4(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf2(
+// CHECK-LABEL: @test_vcpop_v_u16mf2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcpopv.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vuint16mf2_t test_vcpopv_v_u16mf2(vuint16mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16mf2(vs2, vl);
+vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16mf2(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m1(
+// CHECK-LABEL: @test_vcpop_v_u16m1(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcpopv.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vuint16m1_t test_vcpopv_v_u16m1(vuint16m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m1(vs2, vl);
+vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m1(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m2(
+// CHECK-LABEL: @test_vcpop_v_u16m2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcpopv.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vuint16m2_t test_vcpopv_v_u16m2(vuint16m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m2(vs2, vl);
+vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m2(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m4(
+// CHECK-LABEL: @test_vcpop_v_u16m4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcpopv.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vuint16m4_t test_vcpopv_v_u16m4(vuint16m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m4(vs2, vl);
+vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m4(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m8(
+// CHECK-LABEL: @test_vcpop_v_u16m8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcpopv.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vuint16m8_t test_vcpopv_v_u16m8(vuint16m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m8(vs2, vl);
+vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m8(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32mf2(
+// CHECK-LABEL: @test_vcpop_v_u32mf2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcpopv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vuint32mf2_t test_vcpopv_v_u32mf2(vuint32mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32mf2(vs2, vl);
+vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32mf2(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m1(
+// CHECK-LABEL: @test_vcpop_v_u32m1(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcpopv.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vuint32m1_t test_vcpopv_v_u32m1(vuint32m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m1(vs2, vl);
+vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m1(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m2(
+// CHECK-LABEL: @test_vcpop_v_u32m2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcpopv.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vuint32m2_t test_vcpopv_v_u32m2(vuint32m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m2(vs2, vl);
+vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m2(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m4(
+// CHECK-LABEL: @test_vcpop_v_u32m4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcpopv.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vuint32m4_t test_vcpopv_v_u32m4(vuint32m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m4(vs2, vl);
+vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m4(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m8(
+// CHECK-LABEL: @test_vcpop_v_u32m8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcpopv.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vuint32m8_t test_vcpopv_v_u32m8(vuint32m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m8(vs2, vl);
+vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m8(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m1(
+// CHECK-LABEL: @test_vcpop_v_u64m1(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcpopv.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
-vuint64m1_t test_vcpopv_v_u64m1(vuint64m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m1(vs2, vl);
+vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m1(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m2(
+// CHECK-LABEL: @test_vcpop_v_u64m2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcpopv.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-vuint64m2_t test_vcpopv_v_u64m2(vuint64m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m2(vs2, vl);
+vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m2(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m4(
+// CHECK-LABEL: @test_vcpop_v_u64m4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcpopv.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
-vuint64m4_t test_vcpopv_v_u64m4(vuint64m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m4(vs2, vl);
+vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m4(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m8(
+// CHECK-LABEL: @test_vcpop_v_u64m8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcpopv.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-vuint64m8_t test_vcpopv_v_u64m8(vuint64m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m8(vs2, vl);
+vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m8(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf8_m(
+// CHECK-LABEL: @test_vcpop_v_u8mf8_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcpopv.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vuint8mf8_t test_vcpopv_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf8_m(mask, vs2, vl);
+vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf8_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf4_m(
+// CHECK-LABEL: @test_vcpop_v_u8mf4_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcpopv.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vuint8mf4_t test_vcpopv_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf4_m(mask, vs2, vl);
+vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf4_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf2_m(
+// CHECK-LABEL: @test_vcpop_v_u8mf2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcpopv.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vuint8mf2_t test_vcpopv_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf2_m(mask, vs2, vl);
+vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf2_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m1_m(
+// CHECK-LABEL: @test_vcpop_v_u8m1_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcpopv.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vuint8m1_t test_vcpopv_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m1_m(mask, vs2, vl);
+vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m1_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m2_m(
+// CHECK-LABEL: @test_vcpop_v_u8m2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcpopv.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vuint8m2_t test_vcpopv_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m2_m(mask, vs2, vl);
+vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m2_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m4_m(
+// CHECK-LABEL: @test_vcpop_v_u8m4_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcpopv.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vuint8m4_t test_vcpopv_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m4_m(mask, vs2, vl);
+vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m4_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m8_m(
+// CHECK-LABEL: @test_vcpop_v_u8m8_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcpopv.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vuint8m8_t test_vcpopv_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m8_m(mask, vs2, vl);
+vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m8_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf4_m(
+// CHECK-LABEL: @test_vcpop_v_u16mf4_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcpopv.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vuint16mf4_t test_vcpopv_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16mf4_m(mask, vs2, vl);
+vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16mf4_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf2_m(
+// CHECK-LABEL: @test_vcpop_v_u16mf2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcpopv.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vuint16mf2_t test_vcpopv_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16mf2_m(mask, vs2, vl);
+vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16mf2_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m1_m(
+// CHECK-LABEL: @test_vcpop_v_u16m1_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcpopv.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vuint16m1_t test_vcpopv_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m1_m(mask, vs2, vl);
+vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m1_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m2_m(
+// CHECK-LABEL: @test_vcpop_v_u16m2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcpopv.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vuint16m2_t test_vcpopv_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m2_m(mask, vs2, vl);
+vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m2_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m4_m(
+// CHECK-LABEL: @test_vcpop_v_u16m4_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcpopv.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vuint16m4_t test_vcpopv_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m4_m(mask, vs2, vl);
+vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m4_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m8_m(
+// CHECK-LABEL: @test_vcpop_v_u16m8_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcpopv.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vuint16m8_t test_vcpopv_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m8_m(mask, vs2, vl);
+vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m8_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32mf2_m(
+// CHECK-LABEL: @test_vcpop_v_u32mf2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcpopv.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vuint32mf2_t test_vcpopv_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32mf2_m(mask, vs2, vl);
+vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32mf2_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m1_m(
+// CHECK-LABEL: @test_vcpop_v_u32m1_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcpopv.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vuint32m1_t test_vcpopv_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m1_m(mask, vs2, vl);
+vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m1_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m2_m(
+// CHECK-LABEL: @test_vcpop_v_u32m2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcpopv.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vuint32m2_t test_vcpopv_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m2_m(mask, vs2, vl);
+vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m2_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m4_m(
+// CHECK-LABEL: @test_vcpop_v_u32m4_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcpopv.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vuint32m4_t test_vcpopv_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m4_m(mask, vs2, vl);
+vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m4_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m8_m(
+// CHECK-LABEL: @test_vcpop_v_u32m8_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcpopv.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vuint32m8_t test_vcpopv_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m8_m(mask, vs2, vl);
+vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m8_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m1_m(
+// CHECK-LABEL: @test_vcpop_v_u64m1_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcpopv.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
-vuint64m1_t test_vcpopv_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m1_m(mask, vs2, vl);
+vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m1_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m2_m(
+// CHECK-LABEL: @test_vcpop_v_u64m2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcpopv.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-vuint64m2_t test_vcpopv_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m2_m(mask, vs2, vl);
+vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m2_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m4_m(
+// CHECK-LABEL: @test_vcpop_v_u64m4_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcpopv.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
-vuint64m4_t test_vcpopv_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m4_m(mask, vs2, vl);
+vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m4_m(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m8_m(
+// CHECK-LABEL: @test_vcpop_v_u64m8_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcpopv.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-vuint64m8_t test_vcpopv_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m8_m(mask, vs2, vl);
+vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m8_m(mask, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcpopv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcpopv.c
index adb0ac9..5625b19 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcpopv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcpopv.c
@@ -16,399 +16,399 @@
#include <riscv_vector.h>
-// CHECK-LABEL: @test_vcpopv_v_u8mf8(
+// CHECK-LABEL: @test_vcpop_v_u8mf8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcpopv.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vuint8mf8_t test_vcpopv_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf4(
+// CHECK-LABEL: @test_vcpop_v_u8mf4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcpopv.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vuint8mf4_t test_vcpopv_v_u8mf4(vuint8mf4_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf2(
+// CHECK-LABEL: @test_vcpop_v_u8mf2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcpopv.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vuint8mf2_t test_vcpopv_v_u8mf2(vuint8mf2_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m1(
+// CHECK-LABEL: @test_vcpop_v_u8m1(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcpopv.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vuint8m1_t test_vcpopv_v_u8m1(vuint8m1_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m2(
+// CHECK-LABEL: @test_vcpop_v_u8m2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcpopv.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vuint8m2_t test_vcpopv_v_u8m2(vuint8m2_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m4(
+// CHECK-LABEL: @test_vcpop_v_u8m4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcpopv.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vuint8m4_t test_vcpopv_v_u8m4(vuint8m4_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m8(
+// CHECK-LABEL: @test_vcpop_v_u8m8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcpopv.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vuint8m8_t test_vcpopv_v_u8m8(vuint8m8_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf4(
+// CHECK-LABEL: @test_vcpop_v_u16mf4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcpopv.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vuint16mf4_t test_vcpopv_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf2(
+// CHECK-LABEL: @test_vcpop_v_u16mf2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcpopv.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vuint16mf2_t test_vcpopv_v_u16mf2(vuint16mf2_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m1(
+// CHECK-LABEL: @test_vcpop_v_u16m1(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcpopv.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vuint16m1_t test_vcpopv_v_u16m1(vuint16m1_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m2(
+// CHECK-LABEL: @test_vcpop_v_u16m2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcpopv.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vuint16m2_t test_vcpopv_v_u16m2(vuint16m2_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m4(
+// CHECK-LABEL: @test_vcpop_v_u16m4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcpopv.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vuint16m4_t test_vcpopv_v_u16m4(vuint16m4_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m8(
+// CHECK-LABEL: @test_vcpop_v_u16m8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcpopv.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vuint16m8_t test_vcpopv_v_u16m8(vuint16m8_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32mf2(
+// CHECK-LABEL: @test_vcpop_v_u32mf2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcpopv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vuint32mf2_t test_vcpopv_v_u32mf2(vuint32mf2_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m1(
+// CHECK-LABEL: @test_vcpop_v_u32m1(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcpopv.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vuint32m1_t test_vcpopv_v_u32m1(vuint32m1_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m2(
+// CHECK-LABEL: @test_vcpop_v_u32m2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcpopv.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vuint32m2_t test_vcpopv_v_u32m2(vuint32m2_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m4(
+// CHECK-LABEL: @test_vcpop_v_u32m4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcpopv.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vuint32m4_t test_vcpopv_v_u32m4(vuint32m4_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m8(
+// CHECK-LABEL: @test_vcpop_v_u32m8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcpopv.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vuint32m8_t test_vcpopv_v_u32m8(vuint32m8_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m1(
+// CHECK-LABEL: @test_vcpop_v_u64m1(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcpopv.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
-vuint64m1_t test_vcpopv_v_u64m1(vuint64m1_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m2(
+// CHECK-LABEL: @test_vcpop_v_u64m2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcpopv.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-vuint64m2_t test_vcpopv_v_u64m2(vuint64m2_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m4(
+// CHECK-LABEL: @test_vcpop_v_u64m4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcpopv.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
-vuint64m4_t test_vcpopv_v_u64m4(vuint64m4_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m8(
+// CHECK-LABEL: @test_vcpop_v_u64m8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcpopv.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-vuint64m8_t test_vcpopv_v_u64m8(vuint64m8_t vs2, size_t vl) {
- return __riscv_vcpopv(vs2, vl);
+vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) {
+ return __riscv_vcpop(vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf8_m(
+// CHECK-LABEL: @test_vcpop_v_u8mf8_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcpopv.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vuint8mf8_t test_vcpopv_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf4_m(
+// CHECK-LABEL: @test_vcpop_v_u8mf4_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcpopv.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vuint8mf4_t test_vcpopv_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf2_m(
+// CHECK-LABEL: @test_vcpop_v_u8mf2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcpopv.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vuint8mf2_t test_vcpopv_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m1_m(
+// CHECK-LABEL: @test_vcpop_v_u8m1_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcpopv.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vuint8m1_t test_vcpopv_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m2_m(
+// CHECK-LABEL: @test_vcpop_v_u8m2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcpopv.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vuint8m2_t test_vcpopv_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m4_m(
+// CHECK-LABEL: @test_vcpop_v_u8m4_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcpopv.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vuint8m4_t test_vcpopv_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m8_m(
+// CHECK-LABEL: @test_vcpop_v_u8m8_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcpopv.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vuint8m8_t test_vcpopv_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf4_m(
+// CHECK-LABEL: @test_vcpop_v_u16mf4_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcpopv.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vuint16mf4_t test_vcpopv_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf2_m(
+// CHECK-LABEL: @test_vcpop_v_u16mf2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcpopv.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vuint16mf2_t test_vcpopv_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m1_m(
+// CHECK-LABEL: @test_vcpop_v_u16m1_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcpopv.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vuint16m1_t test_vcpopv_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m2_m(
+// CHECK-LABEL: @test_vcpop_v_u16m2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcpopv.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vuint16m2_t test_vcpopv_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m4_m(
+// CHECK-LABEL: @test_vcpop_v_u16m4_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcpopv.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vuint16m4_t test_vcpopv_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m8_m(
+// CHECK-LABEL: @test_vcpop_v_u16m8_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcpopv.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vuint16m8_t test_vcpopv_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32mf2_m(
+// CHECK-LABEL: @test_vcpop_v_u32mf2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcpopv.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vuint32mf2_t test_vcpopv_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m1_m(
+// CHECK-LABEL: @test_vcpop_v_u32m1_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcpopv.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vuint32m1_t test_vcpopv_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m2_m(
+// CHECK-LABEL: @test_vcpop_v_u32m2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcpopv.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vuint32m2_t test_vcpopv_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m4_m(
+// CHECK-LABEL: @test_vcpop_v_u32m4_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcpopv.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vuint32m4_t test_vcpopv_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m8_m(
+// CHECK-LABEL: @test_vcpop_v_u32m8_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcpopv.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vuint32m8_t test_vcpopv_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m1_m(
+// CHECK-LABEL: @test_vcpop_v_u64m1_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcpopv.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
-vuint64m1_t test_vcpopv_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m2_m(
+// CHECK-LABEL: @test_vcpop_v_u64m2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcpopv.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-vuint64m2_t test_vcpopv_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m4_m(
+// CHECK-LABEL: @test_vcpop_v_u64m4_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcpopv.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
-vuint64m4_t test_vcpopv_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m8_m(
+// CHECK-LABEL: @test_vcpop_v_u64m8_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcpopv.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-vuint64m8_t test_vcpopv_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) {
- return __riscv_vcpopv(mask, vs2, vl);
+vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) {
+ return __riscv_vcpop(mask, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcpopv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcpopv.c
index 8a1f2e1..3a11033 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcpopv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcpopv.c
@@ -16,795 +16,795 @@
#include <riscv_vector.h>
-// CHECK-LABEL: @test_vcpopv_v_u8mf8_tu(
+// CHECK-LABEL: @test_vcpop_v_u8mf8_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcpopv.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vuint8mf8_t test_vcpopv_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf8_tu(maskedoff, vs2, vl);
+vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf8_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf4_tu(
+// CHECK-LABEL: @test_vcpop_v_u8mf4_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcpopv.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vuint8mf4_t test_vcpopv_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf4_tu(maskedoff, vs2, vl);
+vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf4_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf2_tu(
+// CHECK-LABEL: @test_vcpop_v_u8mf2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcpopv.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vuint8mf2_t test_vcpopv_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf2_tu(maskedoff, vs2, vl);
+vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf2_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m1_tu(
+// CHECK-LABEL: @test_vcpop_v_u8m1_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcpopv.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vuint8m1_t test_vcpopv_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m1_tu(maskedoff, vs2, vl);
+vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m1_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m2_tu(
+// CHECK-LABEL: @test_vcpop_v_u8m2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcpopv.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vuint8m2_t test_vcpopv_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m2_tu(maskedoff, vs2, vl);
+vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m2_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m4_tu(
+// CHECK-LABEL: @test_vcpop_v_u8m4_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcpopv.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vuint8m4_t test_vcpopv_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m4_tu(maskedoff, vs2, vl);
+vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m4_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m8_tu(
+// CHECK-LABEL: @test_vcpop_v_u8m8_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcpopv.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vuint8m8_t test_vcpopv_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m8_tu(maskedoff, vs2, vl);
+vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m8_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf4_tu(
+// CHECK-LABEL: @test_vcpop_v_u16mf4_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcpopv.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vuint16mf4_t test_vcpopv_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16mf4_tu(maskedoff, vs2, vl);
+vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16mf4_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf2_tu(
+// CHECK-LABEL: @test_vcpop_v_u16mf2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcpopv.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vuint16mf2_t test_vcpopv_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16mf2_tu(maskedoff, vs2, vl);
+vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16mf2_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m1_tu(
+// CHECK-LABEL: @test_vcpop_v_u16m1_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcpopv.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vuint16m1_t test_vcpopv_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m1_tu(maskedoff, vs2, vl);
+vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m1_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m2_tu(
+// CHECK-LABEL: @test_vcpop_v_u16m2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcpopv.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vuint16m2_t test_vcpopv_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m2_tu(maskedoff, vs2, vl);
+vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m2_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m4_tu(
+// CHECK-LABEL: @test_vcpop_v_u16m4_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcpopv.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vuint16m4_t test_vcpopv_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m4_tu(maskedoff, vs2, vl);
+vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m4_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m8_tu(
+// CHECK-LABEL: @test_vcpop_v_u16m8_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcpopv.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vuint16m8_t test_vcpopv_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m8_tu(maskedoff, vs2, vl);
+vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m8_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32mf2_tu(
+// CHECK-LABEL: @test_vcpop_v_u32mf2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcpopv.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vuint32mf2_t test_vcpopv_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32mf2_tu(maskedoff, vs2, vl);
+vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32mf2_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m1_tu(
+// CHECK-LABEL: @test_vcpop_v_u32m1_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcpopv.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vuint32m1_t test_vcpopv_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m1_tu(maskedoff, vs2, vl);
+vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m1_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m2_tu(
+// CHECK-LABEL: @test_vcpop_v_u32m2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcpopv.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vuint32m2_t test_vcpopv_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m2_tu(maskedoff, vs2, vl);
+vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m2_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m4_tu(
+// CHECK-LABEL: @test_vcpop_v_u32m4_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcpopv.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vuint32m4_t test_vcpopv_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m4_tu(maskedoff, vs2, vl);
+vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m4_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m8_tu(
+// CHECK-LABEL: @test_vcpop_v_u32m8_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcpopv.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vuint32m8_t test_vcpopv_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m8_tu(maskedoff, vs2, vl);
+vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m8_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m1_tu(
+// CHECK-LABEL: @test_vcpop_v_u64m1_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcpopv.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
-vuint64m1_t test_vcpopv_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m1_tu(maskedoff, vs2, vl);
+vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m1_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m2_tu(
+// CHECK-LABEL: @test_vcpop_v_u64m2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcpopv.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-vuint64m2_t test_vcpopv_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m2_tu(maskedoff, vs2, vl);
+vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m2_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m4_tu(
+// CHECK-LABEL: @test_vcpop_v_u64m4_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcpopv.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
-vuint64m4_t test_vcpopv_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m4_tu(maskedoff, vs2, vl);
+vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m4_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m8_tu(
+// CHECK-LABEL: @test_vcpop_v_u64m8_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcpopv.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-vuint64m8_t test_vcpopv_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m8_tu(maskedoff, vs2, vl);
+vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m8_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf8_tum(
+// CHECK-LABEL: @test_vcpop_v_u8mf8_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcpopv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vuint8mf8_t test_vcpopv_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf8_tum(mask, maskedoff, vs2, vl);
+vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf8_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf4_tum(
+// CHECK-LABEL: @test_vcpop_v_u8mf4_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcpopv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vuint8mf4_t test_vcpopv_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf4_tum(mask, maskedoff, vs2, vl);
+vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf4_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf2_tum(
+// CHECK-LABEL: @test_vcpop_v_u8mf2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcpopv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vuint8mf2_t test_vcpopv_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf2_tum(mask, maskedoff, vs2, vl);
+vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf2_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m1_tum(
+// CHECK-LABEL: @test_vcpop_v_u8m1_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcpopv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vuint8m1_t test_vcpopv_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m1_tum(mask, maskedoff, vs2, vl);
+vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m1_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m2_tum(
+// CHECK-LABEL: @test_vcpop_v_u8m2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcpopv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vuint8m2_t test_vcpopv_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m2_tum(mask, maskedoff, vs2, vl);
+vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m2_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m4_tum(
+// CHECK-LABEL: @test_vcpop_v_u8m4_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcpopv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vuint8m4_t test_vcpopv_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m4_tum(mask, maskedoff, vs2, vl);
+vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m4_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m8_tum(
+// CHECK-LABEL: @test_vcpop_v_u8m8_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcpopv.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vuint8m8_t test_vcpopv_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m8_tum(mask, maskedoff, vs2, vl);
+vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m8_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf4_tum(
+// CHECK-LABEL: @test_vcpop_v_u16mf4_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcpopv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vuint16mf4_t test_vcpopv_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16mf4_tum(mask, maskedoff, vs2, vl);
+vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16mf4_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf2_tum(
+// CHECK-LABEL: @test_vcpop_v_u16mf2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcpopv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vuint16mf2_t test_vcpopv_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16mf2_tum(mask, maskedoff, vs2, vl);
+vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16mf2_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m1_tum(
+// CHECK-LABEL: @test_vcpop_v_u16m1_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcpopv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vuint16m1_t test_vcpopv_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m1_tum(mask, maskedoff, vs2, vl);
+vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m1_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m2_tum(
+// CHECK-LABEL: @test_vcpop_v_u16m2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcpopv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vuint16m2_t test_vcpopv_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m2_tum(mask, maskedoff, vs2, vl);
+vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m2_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m4_tum(
+// CHECK-LABEL: @test_vcpop_v_u16m4_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcpopv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vuint16m4_t test_vcpopv_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m4_tum(mask, maskedoff, vs2, vl);
+vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m4_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m8_tum(
+// CHECK-LABEL: @test_vcpop_v_u16m8_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcpopv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vuint16m8_t test_vcpopv_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m8_tum(mask, maskedoff, vs2, vl);
+vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m8_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32mf2_tum(
+// CHECK-LABEL: @test_vcpop_v_u32mf2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcpopv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vuint32mf2_t test_vcpopv_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32mf2_tum(mask, maskedoff, vs2, vl);
+vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32mf2_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m1_tum(
+// CHECK-LABEL: @test_vcpop_v_u32m1_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcpopv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vuint32m1_t test_vcpopv_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m1_tum(mask, maskedoff, vs2, vl);
+vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m1_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m2_tum(
+// CHECK-LABEL: @test_vcpop_v_u32m2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcpopv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vuint32m2_t test_vcpopv_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m2_tum(mask, maskedoff, vs2, vl);
+vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m2_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m4_tum(
+// CHECK-LABEL: @test_vcpop_v_u32m4_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcpopv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vuint32m4_t test_vcpopv_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m4_tum(mask, maskedoff, vs2, vl);
+vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m4_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m8_tum(
+// CHECK-LABEL: @test_vcpop_v_u32m8_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcpopv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vuint32m8_t test_vcpopv_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m8_tum(mask, maskedoff, vs2, vl);
+vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m8_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m1_tum(
+// CHECK-LABEL: @test_vcpop_v_u64m1_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcpopv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
-vuint64m1_t test_vcpopv_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m1_tum(mask, maskedoff, vs2, vl);
+vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m1_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m2_tum(
+// CHECK-LABEL: @test_vcpop_v_u64m2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcpopv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-vuint64m2_t test_vcpopv_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m2_tum(mask, maskedoff, vs2, vl);
+vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m2_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m4_tum(
+// CHECK-LABEL: @test_vcpop_v_u64m4_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcpopv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
-vuint64m4_t test_vcpopv_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m4_tum(mask, maskedoff, vs2, vl);
+vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m4_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m8_tum(
+// CHECK-LABEL: @test_vcpop_v_u64m8_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcpopv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-vuint64m8_t test_vcpopv_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m8_tum(mask, maskedoff, vs2, vl);
+vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m8_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf8_tumu(
+// CHECK-LABEL: @test_vcpop_v_u8mf8_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcpopv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vuint8mf8_t test_vcpopv_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf8_tumu(mask, maskedoff, vs2, vl);
+vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf8_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf4_tumu(
+// CHECK-LABEL: @test_vcpop_v_u8mf4_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcpopv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vuint8mf4_t test_vcpopv_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf4_tumu(mask, maskedoff, vs2, vl);
+vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf4_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf2_tumu(
+// CHECK-LABEL: @test_vcpop_v_u8mf2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcpopv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vuint8mf2_t test_vcpopv_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf2_tumu(mask, maskedoff, vs2, vl);
+vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf2_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m1_tumu(
+// CHECK-LABEL: @test_vcpop_v_u8m1_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcpopv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vuint8m1_t test_vcpopv_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m1_tumu(mask, maskedoff, vs2, vl);
+vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m1_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m2_tumu(
+// CHECK-LABEL: @test_vcpop_v_u8m2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcpopv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vuint8m2_t test_vcpopv_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m2_tumu(mask, maskedoff, vs2, vl);
+vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m2_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m4_tumu(
+// CHECK-LABEL: @test_vcpop_v_u8m4_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcpopv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vuint8m4_t test_vcpopv_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m4_tumu(mask, maskedoff, vs2, vl);
+vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m4_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m8_tumu(
+// CHECK-LABEL: @test_vcpop_v_u8m8_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcpopv.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vuint8m8_t test_vcpopv_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m8_tumu(mask, maskedoff, vs2, vl);
+vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m8_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf4_tumu(
+// CHECK-LABEL: @test_vcpop_v_u16mf4_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcpopv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vuint16mf4_t test_vcpopv_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16mf4_tumu(mask, maskedoff, vs2, vl);
+vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16mf4_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf2_tumu(
+// CHECK-LABEL: @test_vcpop_v_u16mf2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcpopv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vuint16mf2_t test_vcpopv_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16mf2_tumu(mask, maskedoff, vs2, vl);
+vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16mf2_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m1_tumu(
+// CHECK-LABEL: @test_vcpop_v_u16m1_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcpopv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vuint16m1_t test_vcpopv_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m1_tumu(mask, maskedoff, vs2, vl);
+vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m1_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m2_tumu(
+// CHECK-LABEL: @test_vcpop_v_u16m2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcpopv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vuint16m2_t test_vcpopv_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m2_tumu(mask, maskedoff, vs2, vl);
+vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m2_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m4_tumu(
+// CHECK-LABEL: @test_vcpop_v_u16m4_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcpopv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vuint16m4_t test_vcpopv_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m4_tumu(mask, maskedoff, vs2, vl);
+vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m4_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m8_tumu(
+// CHECK-LABEL: @test_vcpop_v_u16m8_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcpopv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vuint16m8_t test_vcpopv_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m8_tumu(mask, maskedoff, vs2, vl);
+vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m8_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32mf2_tumu(
+// CHECK-LABEL: @test_vcpop_v_u32mf2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcpopv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vuint32mf2_t test_vcpopv_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32mf2_tumu(mask, maskedoff, vs2, vl);
+vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32mf2_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m1_tumu(
+// CHECK-LABEL: @test_vcpop_v_u32m1_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcpopv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vuint32m1_t test_vcpopv_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m1_tumu(mask, maskedoff, vs2, vl);
+vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m1_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m2_tumu(
+// CHECK-LABEL: @test_vcpop_v_u32m2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcpopv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vuint32m2_t test_vcpopv_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m2_tumu(mask, maskedoff, vs2, vl);
+vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m2_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m4_tumu(
+// CHECK-LABEL: @test_vcpop_v_u32m4_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcpopv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vuint32m4_t test_vcpopv_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m4_tumu(mask, maskedoff, vs2, vl);
+vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m4_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m8_tumu(
+// CHECK-LABEL: @test_vcpop_v_u32m8_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcpopv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vuint32m8_t test_vcpopv_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m8_tumu(mask, maskedoff, vs2, vl);
+vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m8_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m1_tumu(
+// CHECK-LABEL: @test_vcpop_v_u64m1_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcpopv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
-vuint64m1_t test_vcpopv_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m1_tumu(mask, maskedoff, vs2, vl);
+vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m1_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m2_tumu(
+// CHECK-LABEL: @test_vcpop_v_u64m2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcpopv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-vuint64m2_t test_vcpopv_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m2_tumu(mask, maskedoff, vs2, vl);
+vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m2_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m4_tumu(
+// CHECK-LABEL: @test_vcpop_v_u64m4_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcpopv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
-vuint64m4_t test_vcpopv_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m4_tumu(mask, maskedoff, vs2, vl);
+vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m4_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m8_tumu(
+// CHECK-LABEL: @test_vcpop_v_u64m8_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcpopv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-vuint64m8_t test_vcpopv_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m8_tumu(mask, maskedoff, vs2, vl);
+vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m8_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf8_mu(
+// CHECK-LABEL: @test_vcpop_v_u8mf8_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcpopv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vuint8mf8_t test_vcpopv_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf8_mu(mask, maskedoff, vs2, vl);
+vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf8_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf4_mu(
+// CHECK-LABEL: @test_vcpop_v_u8mf4_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcpopv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vuint8mf4_t test_vcpopv_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf4_mu(mask, maskedoff, vs2, vl);
+vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf4_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf2_mu(
+// CHECK-LABEL: @test_vcpop_v_u8mf2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcpopv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vuint8mf2_t test_vcpopv_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8mf2_mu(mask, maskedoff, vs2, vl);
+vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8mf2_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m1_mu(
+// CHECK-LABEL: @test_vcpop_v_u8m1_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcpopv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vuint8m1_t test_vcpopv_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m1_mu(mask, maskedoff, vs2, vl);
+vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m1_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m2_mu(
+// CHECK-LABEL: @test_vcpop_v_u8m2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcpopv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vuint8m2_t test_vcpopv_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m2_mu(mask, maskedoff, vs2, vl);
+vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m2_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m4_mu(
+// CHECK-LABEL: @test_vcpop_v_u8m4_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcpopv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vuint8m4_t test_vcpopv_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m4_mu(mask, maskedoff, vs2, vl);
+vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m4_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m8_mu(
+// CHECK-LABEL: @test_vcpop_v_u8m8_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcpopv.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vuint8m8_t test_vcpopv_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u8m8_mu(mask, maskedoff, vs2, vl);
+vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u8m8_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf4_mu(
+// CHECK-LABEL: @test_vcpop_v_u16mf4_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcpopv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vuint16mf4_t test_vcpopv_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16mf4_mu(mask, maskedoff, vs2, vl);
+vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16mf4_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf2_mu(
+// CHECK-LABEL: @test_vcpop_v_u16mf2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcpopv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vuint16mf2_t test_vcpopv_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16mf2_mu(mask, maskedoff, vs2, vl);
+vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16mf2_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m1_mu(
+// CHECK-LABEL: @test_vcpop_v_u16m1_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcpopv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vuint16m1_t test_vcpopv_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m1_mu(mask, maskedoff, vs2, vl);
+vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m1_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m2_mu(
+// CHECK-LABEL: @test_vcpop_v_u16m2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcpopv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vuint16m2_t test_vcpopv_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m2_mu(mask, maskedoff, vs2, vl);
+vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m2_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m4_mu(
+// CHECK-LABEL: @test_vcpop_v_u16m4_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcpopv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vuint16m4_t test_vcpopv_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m4_mu(mask, maskedoff, vs2, vl);
+vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m4_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m8_mu(
+// CHECK-LABEL: @test_vcpop_v_u16m8_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcpopv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vuint16m8_t test_vcpopv_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u16m8_mu(mask, maskedoff, vs2, vl);
+vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u16m8_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32mf2_mu(
+// CHECK-LABEL: @test_vcpop_v_u32mf2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcpopv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vuint32mf2_t test_vcpopv_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32mf2_mu(mask, maskedoff, vs2, vl);
+vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32mf2_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m1_mu(
+// CHECK-LABEL: @test_vcpop_v_u32m1_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcpopv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vuint32m1_t test_vcpopv_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m1_mu(mask, maskedoff, vs2, vl);
+vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m1_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m2_mu(
+// CHECK-LABEL: @test_vcpop_v_u32m2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcpopv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vuint32m2_t test_vcpopv_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m2_mu(mask, maskedoff, vs2, vl);
+vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m2_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m4_mu(
+// CHECK-LABEL: @test_vcpop_v_u32m4_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcpopv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vuint32m4_t test_vcpopv_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m4_mu(mask, maskedoff, vs2, vl);
+vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m4_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m8_mu(
+// CHECK-LABEL: @test_vcpop_v_u32m8_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcpopv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vuint32m8_t test_vcpopv_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u32m8_mu(mask, maskedoff, vs2, vl);
+vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u32m8_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m1_mu(
+// CHECK-LABEL: @test_vcpop_v_u64m1_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcpopv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
-vuint64m1_t test_vcpopv_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m1_mu(mask, maskedoff, vs2, vl);
+vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m1_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m2_mu(
+// CHECK-LABEL: @test_vcpop_v_u64m2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcpopv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-vuint64m2_t test_vcpopv_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m2_mu(mask, maskedoff, vs2, vl);
+vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m2_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m4_mu(
+// CHECK-LABEL: @test_vcpop_v_u64m4_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcpopv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
-vuint64m4_t test_vcpopv_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m4_mu(mask, maskedoff, vs2, vl);
+vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m4_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m8_mu(
+// CHECK-LABEL: @test_vcpop_v_u64m8_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcpopv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-vuint64m8_t test_vcpopv_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
- return __riscv_vcpopv_v_u64m8_mu(mask, maskedoff, vs2, vl);
+vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
+ return __riscv_vcpop_v_u64m8_mu(mask, maskedoff, vs2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcpopv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcpopv.c
index 02a499d..953ccac 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcpopv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcpopv.c
@@ -16,795 +16,795 @@
#include <riscv_vector.h>
-// CHECK-LABEL: @test_vcpopv_v_u8mf8_tu(
+// CHECK-LABEL: @test_vcpop_v_u8mf8_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcpopv.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vuint8mf8_t test_vcpopv_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf4_tu(
+// CHECK-LABEL: @test_vcpop_v_u8mf4_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcpopv.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vuint8mf4_t test_vcpopv_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf2_tu(
+// CHECK-LABEL: @test_vcpop_v_u8mf2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcpopv.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vuint8mf2_t test_vcpopv_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m1_tu(
+// CHECK-LABEL: @test_vcpop_v_u8m1_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcpopv.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vuint8m1_t test_vcpopv_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m2_tu(
+// CHECK-LABEL: @test_vcpop_v_u8m2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcpopv.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vuint8m2_t test_vcpopv_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m4_tu(
+// CHECK-LABEL: @test_vcpop_v_u8m4_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcpopv.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vuint8m4_t test_vcpopv_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m8_tu(
+// CHECK-LABEL: @test_vcpop_v_u8m8_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcpopv.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vuint8m8_t test_vcpopv_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf4_tu(
+// CHECK-LABEL: @test_vcpop_v_u16mf4_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcpopv.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vuint16mf4_t test_vcpopv_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf2_tu(
+// CHECK-LABEL: @test_vcpop_v_u16mf2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcpopv.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vuint16mf2_t test_vcpopv_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m1_tu(
+// CHECK-LABEL: @test_vcpop_v_u16m1_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcpopv.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vuint16m1_t test_vcpopv_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m2_tu(
+// CHECK-LABEL: @test_vcpop_v_u16m2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcpopv.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vuint16m2_t test_vcpopv_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m4_tu(
+// CHECK-LABEL: @test_vcpop_v_u16m4_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcpopv.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vuint16m4_t test_vcpopv_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m8_tu(
+// CHECK-LABEL: @test_vcpop_v_u16m8_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcpopv.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vuint16m8_t test_vcpopv_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32mf2_tu(
+// CHECK-LABEL: @test_vcpop_v_u32mf2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcpopv.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vuint32mf2_t test_vcpopv_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m1_tu(
+// CHECK-LABEL: @test_vcpop_v_u32m1_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcpopv.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vuint32m1_t test_vcpopv_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m2_tu(
+// CHECK-LABEL: @test_vcpop_v_u32m2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcpopv.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vuint32m2_t test_vcpopv_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m4_tu(
+// CHECK-LABEL: @test_vcpop_v_u32m4_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcpopv.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vuint32m4_t test_vcpopv_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m8_tu(
+// CHECK-LABEL: @test_vcpop_v_u32m8_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcpopv.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vuint32m8_t test_vcpopv_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m1_tu(
+// CHECK-LABEL: @test_vcpop_v_u64m1_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcpopv.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
-vuint64m1_t test_vcpopv_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m2_tu(
+// CHECK-LABEL: @test_vcpop_v_u64m2_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcpopv.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-vuint64m2_t test_vcpopv_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m4_tu(
+// CHECK-LABEL: @test_vcpop_v_u64m4_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcpopv.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
-vuint64m4_t test_vcpopv_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m8_tu(
+// CHECK-LABEL: @test_vcpop_v_u64m8_tu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcpopv.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-vuint64m8_t test_vcpopv_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
- return __riscv_vcpopv_tu(maskedoff, vs2, vl);
+vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
+ return __riscv_vcpop_tu(maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf8_tum(
+// CHECK-LABEL: @test_vcpop_v_u8mf8_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcpopv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vuint8mf8_t test_vcpopv_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf4_tum(
+// CHECK-LABEL: @test_vcpop_v_u8mf4_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcpopv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vuint8mf4_t test_vcpopv_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf2_tum(
+// CHECK-LABEL: @test_vcpop_v_u8mf2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcpopv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vuint8mf2_t test_vcpopv_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m1_tum(
+// CHECK-LABEL: @test_vcpop_v_u8m1_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcpopv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vuint8m1_t test_vcpopv_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m2_tum(
+// CHECK-LABEL: @test_vcpop_v_u8m2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcpopv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vuint8m2_t test_vcpopv_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m4_tum(
+// CHECK-LABEL: @test_vcpop_v_u8m4_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcpopv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vuint8m4_t test_vcpopv_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m8_tum(
+// CHECK-LABEL: @test_vcpop_v_u8m8_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcpopv.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vuint8m8_t test_vcpopv_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf4_tum(
+// CHECK-LABEL: @test_vcpop_v_u16mf4_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcpopv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vuint16mf4_t test_vcpopv_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf2_tum(
+// CHECK-LABEL: @test_vcpop_v_u16mf2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcpopv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vuint16mf2_t test_vcpopv_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m1_tum(
+// CHECK-LABEL: @test_vcpop_v_u16m1_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcpopv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vuint16m1_t test_vcpopv_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m2_tum(
+// CHECK-LABEL: @test_vcpop_v_u16m2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcpopv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vuint16m2_t test_vcpopv_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m4_tum(
+// CHECK-LABEL: @test_vcpop_v_u16m4_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcpopv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vuint16m4_t test_vcpopv_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m8_tum(
+// CHECK-LABEL: @test_vcpop_v_u16m8_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcpopv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vuint16m8_t test_vcpopv_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32mf2_tum(
+// CHECK-LABEL: @test_vcpop_v_u32mf2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcpopv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vuint32mf2_t test_vcpopv_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m1_tum(
+// CHECK-LABEL: @test_vcpop_v_u32m1_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcpopv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vuint32m1_t test_vcpopv_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m2_tum(
+// CHECK-LABEL: @test_vcpop_v_u32m2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcpopv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vuint32m2_t test_vcpopv_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m4_tum(
+// CHECK-LABEL: @test_vcpop_v_u32m4_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcpopv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vuint32m4_t test_vcpopv_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m8_tum(
+// CHECK-LABEL: @test_vcpop_v_u32m8_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcpopv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vuint32m8_t test_vcpopv_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m1_tum(
+// CHECK-LABEL: @test_vcpop_v_u64m1_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcpopv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
-vuint64m1_t test_vcpopv_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m2_tum(
+// CHECK-LABEL: @test_vcpop_v_u64m2_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcpopv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-vuint64m2_t test_vcpopv_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m4_tum(
+// CHECK-LABEL: @test_vcpop_v_u64m4_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcpopv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
-vuint64m4_t test_vcpopv_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m8_tum(
+// CHECK-LABEL: @test_vcpop_v_u64m8_tum(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcpopv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-vuint64m8_t test_vcpopv_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
- return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl);
+vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
+ return __riscv_vcpop_tum(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf8_tumu(
+// CHECK-LABEL: @test_vcpop_v_u8mf8_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcpopv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vuint8mf8_t test_vcpopv_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf4_tumu(
+// CHECK-LABEL: @test_vcpop_v_u8mf4_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcpopv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vuint8mf4_t test_vcpopv_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf2_tumu(
+// CHECK-LABEL: @test_vcpop_v_u8mf2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcpopv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vuint8mf2_t test_vcpopv_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m1_tumu(
+// CHECK-LABEL: @test_vcpop_v_u8m1_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcpopv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vuint8m1_t test_vcpopv_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m2_tumu(
+// CHECK-LABEL: @test_vcpop_v_u8m2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcpopv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vuint8m2_t test_vcpopv_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m4_tumu(
+// CHECK-LABEL: @test_vcpop_v_u8m4_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcpopv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vuint8m4_t test_vcpopv_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m8_tumu(
+// CHECK-LABEL: @test_vcpop_v_u8m8_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcpopv.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vuint8m8_t test_vcpopv_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf4_tumu(
+// CHECK-LABEL: @test_vcpop_v_u16mf4_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcpopv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vuint16mf4_t test_vcpopv_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf2_tumu(
+// CHECK-LABEL: @test_vcpop_v_u16mf2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcpopv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vuint16mf2_t test_vcpopv_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m1_tumu(
+// CHECK-LABEL: @test_vcpop_v_u16m1_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcpopv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vuint16m1_t test_vcpopv_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m2_tumu(
+// CHECK-LABEL: @test_vcpop_v_u16m2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcpopv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vuint16m2_t test_vcpopv_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m4_tumu(
+// CHECK-LABEL: @test_vcpop_v_u16m4_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcpopv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vuint16m4_t test_vcpopv_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m8_tumu(
+// CHECK-LABEL: @test_vcpop_v_u16m8_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcpopv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vuint16m8_t test_vcpopv_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32mf2_tumu(
+// CHECK-LABEL: @test_vcpop_v_u32mf2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcpopv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vuint32mf2_t test_vcpopv_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m1_tumu(
+// CHECK-LABEL: @test_vcpop_v_u32m1_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcpopv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vuint32m1_t test_vcpopv_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m2_tumu(
+// CHECK-LABEL: @test_vcpop_v_u32m2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcpopv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vuint32m2_t test_vcpopv_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m4_tumu(
+// CHECK-LABEL: @test_vcpop_v_u32m4_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcpopv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vuint32m4_t test_vcpopv_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m8_tumu(
+// CHECK-LABEL: @test_vcpop_v_u32m8_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcpopv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vuint32m8_t test_vcpopv_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m1_tumu(
+// CHECK-LABEL: @test_vcpop_v_u64m1_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcpopv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
-vuint64m1_t test_vcpopv_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m2_tumu(
+// CHECK-LABEL: @test_vcpop_v_u64m2_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcpopv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-vuint64m2_t test_vcpopv_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m4_tumu(
+// CHECK-LABEL: @test_vcpop_v_u64m4_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcpopv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
-vuint64m4_t test_vcpopv_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m8_tumu(
+// CHECK-LABEL: @test_vcpop_v_u64m8_tumu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcpopv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-vuint64m8_t test_vcpopv_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
- return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl);
+vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
+ return __riscv_vcpop_tumu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf8_mu(
+// CHECK-LABEL: @test_vcpop_v_u8mf8_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcpopv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vuint8mf8_t test_vcpopv_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf4_mu(
+// CHECK-LABEL: @test_vcpop_v_u8mf4_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcpopv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vuint8mf4_t test_vcpopv_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8mf2_mu(
+// CHECK-LABEL: @test_vcpop_v_u8mf2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcpopv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vuint8mf2_t test_vcpopv_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m1_mu(
+// CHECK-LABEL: @test_vcpop_v_u8m1_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcpopv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vuint8m1_t test_vcpopv_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m2_mu(
+// CHECK-LABEL: @test_vcpop_v_u8m2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcpopv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vuint8m2_t test_vcpopv_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m4_mu(
+// CHECK-LABEL: @test_vcpop_v_u8m4_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcpopv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vuint8m4_t test_vcpopv_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u8m8_mu(
+// CHECK-LABEL: @test_vcpop_v_u8m8_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcpopv.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vuint8m8_t test_vcpopv_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf4_mu(
+// CHECK-LABEL: @test_vcpop_v_u16mf4_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcpopv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vuint16mf4_t test_vcpopv_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16mf2_mu(
+// CHECK-LABEL: @test_vcpop_v_u16mf2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcpopv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vuint16mf2_t test_vcpopv_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m1_mu(
+// CHECK-LABEL: @test_vcpop_v_u16m1_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcpopv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vuint16m1_t test_vcpopv_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m2_mu(
+// CHECK-LABEL: @test_vcpop_v_u16m2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcpopv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vuint16m2_t test_vcpopv_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m4_mu(
+// CHECK-LABEL: @test_vcpop_v_u16m4_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcpopv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vuint16m4_t test_vcpopv_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u16m8_mu(
+// CHECK-LABEL: @test_vcpop_v_u16m8_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcpopv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vuint16m8_t test_vcpopv_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32mf2_mu(
+// CHECK-LABEL: @test_vcpop_v_u32mf2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcpopv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vuint32mf2_t test_vcpopv_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m1_mu(
+// CHECK-LABEL: @test_vcpop_v_u32m1_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcpopv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vuint32m1_t test_vcpopv_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m2_mu(
+// CHECK-LABEL: @test_vcpop_v_u32m2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcpopv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vuint32m2_t test_vcpopv_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m4_mu(
+// CHECK-LABEL: @test_vcpop_v_u32m4_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcpopv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vuint32m4_t test_vcpopv_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u32m8_mu(
+// CHECK-LABEL: @test_vcpop_v_u32m8_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcpopv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vuint32m8_t test_vcpopv_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m1_mu(
+// CHECK-LABEL: @test_vcpop_v_u64m1_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcpopv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
-vuint64m1_t test_vcpopv_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m2_mu(
+// CHECK-LABEL: @test_vcpop_v_u64m2_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcpopv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-vuint64m2_t test_vcpopv_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m4_mu(
+// CHECK-LABEL: @test_vcpop_v_u64m4_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcpopv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
-vuint64m4_t test_vcpopv_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
-// CHECK-LABEL: @test_vcpopv_v_u64m8_mu(
+// CHECK-LABEL: @test_vcpop_v_u64m8_mu(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcpopv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-vuint64m8_t test_vcpopv_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
- return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl);
+vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
+ return __riscv_vcpop_mu(mask, maskedoff, vs2, vl);
}
diff --git a/clang/test/CodeGen/voidptr-vaarg.c b/clang/test/CodeGen/voidptr-vaarg.c
new file mode 100644
index 0000000..d023ddf
--- /dev/null
+++ b/clang/test/CodeGen/voidptr-vaarg.c
@@ -0,0 +1,478 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: webassembly-registered-target
+// RUN: %clang_cc1 -triple wasm32-unknown-unknown -emit-llvm -o - %s | FileCheck %s
+
+// Multiple targets use emitVoidPtrVAArg to lower va_arg instructions in clang
+// PPC is complicated, excluding from this case analysis
+// ForceRightAdjust is false for all non-PPC targets
+// AllowHigherAlign is only false for two Microsoft targets, both of which
+// pass most things by reference.
+//
+// Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
+// QualType ValueTy, bool IsIndirect,
+// TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign,
+// bool AllowHigherAlign, bool ForceRightAdjust =
+// false);
+//
+// Target IsIndirect SlotSize AllowHigher ForceRightAdjust
+// ARC false four true false
+// ARM varies four true false
+// Mips false 4 or 8 true false
+// RISCV varies register true false
+// PPC elided
+// LoongArch varies register true false
+// NVPTX WIP
+// AMDGPU WIP
+// X86_32 false four true false
+// X86_64 MS varies eight false false
+// CSKY false four true false
+// Webassembly varies four true false
+// AArch64 false eight true false
+// AArch64 MS false eight false false
+//
+// Webassembly passes indirectly iff it's an aggregate of multiple values
+// Choosing this as a representative architecture to check IR generation
+// partly because it has a relatively simple variadic calling convention.
+
+// Int, by itself and packed in structs
+// CHECK-LABEL: @raw_int(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGP_CUR]], align 4
+// CHECK-NEXT: ret i32 [[TMP0]]
+//
+int raw_int(__builtin_va_list list) { return __builtin_va_arg(list, int); }
+
+typedef struct {
+ int x;
+} one_int_t;
+
+// CHECK-LABEL: @one_int(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_ONE_INT_T:%.*]], align 4
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[RETVAL]], ptr align 4 [[ARGP_CUR]], i32 4, i1 false)
+// CHECK-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_ONE_INT_T]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[COERCE_DIVE]], align 4
+// CHECK-NEXT: ret i32 [[TMP0]]
+//
+one_int_t one_int(__builtin_va_list list) {
+ return __builtin_va_arg(list, one_int_t);
+}
+
+typedef struct {
+ int x;
+ int y;
+} two_int_t;
+
+// CHECK-LABEL: @two_int(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGP_CUR]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[AGG_RESULT:%.*]], ptr align 4 [[TMP0]], i32 8, i1 false)
+// CHECK-NEXT: ret void
+//
+two_int_t two_int(__builtin_va_list list) {
+ return __builtin_va_arg(list, two_int_t);
+}
+
+// Double, by itself and packed in structs
+// CHECK-LABEL: @raw_double(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 7
+// CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[TMP0]], i32 -8)
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i32 8
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load double, ptr [[ARGP_CUR_ALIGNED]], align 8
+// CHECK-NEXT: ret double [[TMP1]]
+//
+double raw_double(__builtin_va_list list) {
+ return __builtin_va_arg(list, double);
+}
+
+typedef struct {
+ double x;
+} one_double_t;
+
+// CHECK-LABEL: @one_double(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_ONE_DOUBLE_T:%.*]], align 8
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 7
+// CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[TMP0]], i32 -8)
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i32 8
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 8 [[RETVAL]], ptr align 8 [[ARGP_CUR_ALIGNED]], i32 8, i1 false)
+// CHECK-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_ONE_DOUBLE_T]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-NEXT: [[TMP1:%.*]] = load double, ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT: ret double [[TMP1]]
+//
+one_double_t one_double(__builtin_va_list list) {
+ return __builtin_va_arg(list, one_double_t);
+}
+
+typedef struct {
+ double x;
+ double y;
+} two_double_t;
+
+// CHECK-LABEL: @two_double(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGP_CUR]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 8 [[AGG_RESULT:%.*]], ptr align 8 [[TMP0]], i32 16, i1 false)
+// CHECK-NEXT: ret void
+//
+two_double_t two_double(__builtin_va_list list) {
+ return __builtin_va_arg(list, two_double_t);
+}
+
+// Scalar smaller than the slot size (C would promote a short to int)
+typedef struct {
+ char x;
+} one_char_t;
+
+// CHECK-LABEL: @one_char(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_ONE_CHAR_T:%.*]], align 1
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[RETVAL]], ptr align 4 [[ARGP_CUR]], i32 1, i1 false)
+// CHECK-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_ONE_CHAR_T]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[COERCE_DIVE]], align 1
+// CHECK-NEXT: ret i8 [[TMP0]]
+//
+one_char_t one_char(__builtin_va_list list) {
+ return __builtin_va_arg(list, one_char_t);
+}
+
+typedef struct {
+ short x;
+} one_short_t;
+
+// CHECK-LABEL: @one_short(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_ONE_SHORT_T:%.*]], align 2
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 2 [[RETVAL]], ptr align 4 [[ARGP_CUR]], i32 2, i1 false)
+// CHECK-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_ONE_SHORT_T]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[COERCE_DIVE]], align 2
+// CHECK-NEXT: ret i16 [[TMP0]]
+//
+one_short_t one_short(__builtin_va_list list) {
+ return __builtin_va_arg(list, one_short_t);
+}
+
+// Composite smaller than the slot size
+typedef struct {
+ _Alignas(2) char x;
+ char y;
+} char_pair_t;
+
+// CHECK-LABEL: @char_pair(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGP_CUR]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 2 [[AGG_RESULT:%.*]], ptr align 2 [[TMP0]], i32 2, i1 false)
+// CHECK-NEXT: ret void
+//
+char_pair_t char_pair(__builtin_va_list list) {
+ return __builtin_va_arg(list, char_pair_t);
+}
+
+// Empty struct
+typedef struct {
+} empty_t;
+
+// CHECK-LABEL: @empty(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_EMPTY_T:%.*]], align 1
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 0
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[RETVAL]], ptr align 4 [[ARGP_CUR]], i32 0, i1 false)
+// CHECK-NEXT: ret void
+//
+empty_t empty(__builtin_va_list list) {
+ return __builtin_va_arg(list, empty_t);
+}
+
+typedef struct {
+ empty_t x;
+ int y;
+} empty_int_t;
+
+// CHECK-LABEL: @empty_int(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_EMPTY_INT_T:%.*]], align 4
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[RETVAL]], ptr align 4 [[ARGP_CUR]], i32 4, i1 false)
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[RETVAL]], align 4
+// CHECK-NEXT: ret i32 [[TMP0]]
+//
+empty_int_t empty_int(__builtin_va_list list) {
+ return __builtin_va_arg(list, empty_int_t);
+}
+
+typedef struct {
+ int x;
+ empty_t y;
+} int_empty_t;
+
+// CHECK-LABEL: @int_empty(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_INT_EMPTY_T:%.*]], align 4
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[RETVAL]], ptr align 4 [[ARGP_CUR]], i32 4, i1 false)
+// CHECK-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT_EMPTY_T]], ptr [[RETVAL]], i32 0, i32 0
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[COERCE_DIVE]], align 4
+// CHECK-NEXT: ret i32 [[TMP0]]
+//
+int_empty_t int_empty(__builtin_va_list list) {
+ return __builtin_va_arg(list, int_empty_t);
+}
+
+// Need multiple va_arg instructions to check the postincrement
+// Using types that are passed directly as the indirect handling
+// is independent of the alignment handling in emitVoidPtrDirectVAArg.
+
+// CHECK-LABEL: @multiple_int(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: [[OUT0_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: [[OUT1_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: [[OUT2_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: store ptr [[OUT0:%.*]], ptr [[OUT0_ADDR]], align 4
+// CHECK-NEXT: store ptr [[OUT1:%.*]], ptr [[OUT1_ADDR]], align 4
+// CHECK-NEXT: store ptr [[OUT2:%.*]], ptr [[OUT2_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGP_CUR]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[OUT0_ADDR]], align 4
+// CHECK-NEXT: store i32 [[TMP0]], ptr [[TMP1]], align 4
+// CHECK-NEXT: [[ARGP_CUR1:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT2:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR1]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT2]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARGP_CUR1]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[OUT1_ADDR]], align 4
+// CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP3]], align 4
+// CHECK-NEXT: [[ARGP_CUR3:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT4:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR3]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT4]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARGP_CUR3]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[OUT2_ADDR]], align 4
+// CHECK-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
+// CHECK-NEXT: ret void
+//
+void multiple_int(__builtin_va_list list, int *out0, int *out1, int *out2) {
+ *out0 = __builtin_va_arg(list, int);
+ *out1 = __builtin_va_arg(list, int);
+ *out2 = __builtin_va_arg(list, int);
+}
+
+// Scalars in structs are an easy way of specifying alignment from C
+// CHECK-LABEL: @increasing_alignment(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: [[OUT0_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: [[OUT1_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: [[OUT2_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: [[OUT3_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: store ptr [[OUT0:%.*]], ptr [[OUT0_ADDR]], align 4
+// CHECK-NEXT: store ptr [[OUT1:%.*]], ptr [[OUT1_ADDR]], align 4
+// CHECK-NEXT: store ptr [[OUT2:%.*]], ptr [[OUT2_ADDR]], align 4
+// CHECK-NEXT: store ptr [[OUT3:%.*]], ptr [[OUT3_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[OUT0_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[TMP0]], ptr align 4 [[ARGP_CUR]], i32 1, i1 false)
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[OUT1_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR1:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT2:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR1]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT2]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 2 [[TMP1]], ptr align 4 [[ARGP_CUR1]], i32 2, i1 false)
+// CHECK-NEXT: [[ARGP_CUR3:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT4:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR3]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT4]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARGP_CUR3]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[OUT2_ADDR]], align 4
+// CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP3]], align 4
+// CHECK-NEXT: [[ARGP_CUR5:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR5]], i32 7
+// CHECK-NEXT: [[ARGP_CUR5_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[TMP4]], i32 -8)
+// CHECK-NEXT: [[ARGP_NEXT6:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR5_ALIGNED]], i32 8
+// CHECK-NEXT: store ptr [[ARGP_NEXT6]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load double, ptr [[ARGP_CUR5_ALIGNED]], align 8
+// CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[OUT3_ADDR]], align 4
+// CHECK-NEXT: store double [[TMP5]], ptr [[TMP6]], align 8
+// CHECK-NEXT: ret void
+//
+void increasing_alignment(__builtin_va_list list, one_char_t *out0,
+ one_short_t *out1, int *out2, double *out3) {
+ *out0 = __builtin_va_arg(list, one_char_t);
+ *out1 = __builtin_va_arg(list, one_short_t);
+ *out2 = __builtin_va_arg(list, int);
+ *out3 = __builtin_va_arg(list, double);
+}
+
+// CHECK-LABEL: @decreasing_alignment(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: [[OUT0_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: [[OUT1_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: [[OUT2_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: [[OUT3_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: store ptr [[OUT0:%.*]], ptr [[OUT0_ADDR]], align 4
+// CHECK-NEXT: store ptr [[OUT1:%.*]], ptr [[OUT1_ADDR]], align 4
+// CHECK-NEXT: store ptr [[OUT2:%.*]], ptr [[OUT2_ADDR]], align 4
+// CHECK-NEXT: store ptr [[OUT3:%.*]], ptr [[OUT3_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 7
+// CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[TMP0]], i32 -8)
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i32 8
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load double, ptr [[ARGP_CUR_ALIGNED]], align 8
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[OUT0_ADDR]], align 4
+// CHECK-NEXT: store double [[TMP1]], ptr [[TMP2]], align 8
+// CHECK-NEXT: [[ARGP_CUR1:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT2:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR1]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT2]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARGP_CUR1]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[OUT1_ADDR]], align 4
+// CHECK-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[OUT2_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR3:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT4:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR3]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT4]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 2 [[TMP5]], ptr align 4 [[ARGP_CUR3]], i32 2, i1 false)
+// CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[OUT3_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR5:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT6:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR5]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT6]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[TMP6]], ptr align 4 [[ARGP_CUR5]], i32 1, i1 false)
+// CHECK-NEXT: ret void
+//
+void decreasing_alignment(__builtin_va_list list, double *out0, int *out1,
+ one_short_t *out2, one_char_t *out3) {
+ *out0 = __builtin_va_arg(list, double);
+ *out1 = __builtin_va_arg(list, int);
+ *out2 = __builtin_va_arg(list, one_short_t);
+ *out3 = __builtin_va_arg(list, one_char_t);
+}
+
+// Typical edge cases, none hit special handling in VAArg lowering.
+typedef struct {
+ int x[16];
+ double y[8];
+} large_value_t;
+
+// CHECK-LABEL: @large_value(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: store ptr [[OUT:%.*]], ptr [[OUT_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[OUT_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGP_CUR]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 8 [[TMP0]], ptr align 8 [[TMP1]], i32 128, i1 false)
+// CHECK-NEXT: ret void
+//
+void large_value(__builtin_va_list list, large_value_t *out) {
+ *out = __builtin_va_arg(list, large_value_t);
+}
+
+typedef int v128_t __attribute__((__vector_size__(16), __aligned__(16)));
+// CHECK-LABEL: @vector(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: store ptr [[OUT:%.*]], ptr [[OUT_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 15
+// CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[TMP0]], i32 -16)
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i32 16
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[ARGP_CUR_ALIGNED]], align 16
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[OUT_ADDR]], align 4
+// CHECK-NEXT: store <4 x i32> [[TMP1]], ptr [[TMP2]], align 16
+// CHECK-NEXT: ret void
+//
+void vector(__builtin_va_list list, v128_t *out) {
+ *out = __builtin_va_arg(list, v128_t);
+}
+
+typedef struct BF {
+ float not_an_i32[2];
+ int A : 1;
+ char B;
+ int C : 13;
+} BF;
+
+// CHECK-LABEL: @bitfield(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[LIST_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr [[LIST:%.*]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: store ptr [[OUT:%.*]], ptr [[OUT_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[OUT_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
+// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[LIST_ADDR]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGP_CUR]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP0]], ptr align 4 [[TMP1]], i32 12, i1 false)
+// CHECK-NEXT: ret void
+//
+void bitfield(__builtin_va_list list, BF *out) {
+ *out = __builtin_va_arg(list, BF);
+}
diff --git a/clang/test/CodeGenCUDA/cuda-builtin-vars.cu b/clang/test/CodeGenCUDA/cuda-builtin-vars.cu
index ba5e5f1..7880a80 100644
--- a/clang/test/CodeGenCUDA/cuda-builtin-vars.cu
+++ b/clang/test/CodeGenCUDA/cuda-builtin-vars.cu
@@ -6,21 +6,21 @@
__attribute__((global))
void kernel(int *out) {
int i = 0;
- out[i++] = threadIdx.x; // CHECK: call noundef i32 @llvm.nvvm.read.ptx.sreg.tid.x()
- out[i++] = threadIdx.y; // CHECK: call noundef i32 @llvm.nvvm.read.ptx.sreg.tid.y()
- out[i++] = threadIdx.z; // CHECK: call noundef i32 @llvm.nvvm.read.ptx.sreg.tid.z()
+ out[i++] = threadIdx.x; // CHECK: call noundef{{.*}} i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+ out[i++] = threadIdx.y; // CHECK: call noundef{{.*}} i32 @llvm.nvvm.read.ptx.sreg.tid.y()
+ out[i++] = threadIdx.z; // CHECK: call noundef{{.*}} i32 @llvm.nvvm.read.ptx.sreg.tid.z()
- out[i++] = blockIdx.x; // CHECK: call noundef i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
- out[i++] = blockIdx.y; // CHECK: call noundef i32 @llvm.nvvm.read.ptx.sreg.ctaid.y()
- out[i++] = blockIdx.z; // CHECK: call noundef i32 @llvm.nvvm.read.ptx.sreg.ctaid.z()
+ out[i++] = blockIdx.x; // CHECK: call noundef{{.*}} i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
+ out[i++] = blockIdx.y; // CHECK: call noundef{{.*}} i32 @llvm.nvvm.read.ptx.sreg.ctaid.y()
+ out[i++] = blockIdx.z; // CHECK: call noundef{{.*}} i32 @llvm.nvvm.read.ptx.sreg.ctaid.z()
- out[i++] = blockDim.x; // CHECK: call noundef i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
- out[i++] = blockDim.y; // CHECK: call noundef i32 @llvm.nvvm.read.ptx.sreg.ntid.y()
- out[i++] = blockDim.z; // CHECK: call noundef i32 @llvm.nvvm.read.ptx.sreg.ntid.z()
+ out[i++] = blockDim.x; // CHECK: call noundef{{.*}} i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
+ out[i++] = blockDim.y; // CHECK: call noundef{{.*}} i32 @llvm.nvvm.read.ptx.sreg.ntid.y()
+ out[i++] = blockDim.z; // CHECK: call noundef{{.*}} i32 @llvm.nvvm.read.ptx.sreg.ntid.z()
- out[i++] = gridDim.x; // CHECK: call noundef i32 @llvm.nvvm.read.ptx.sreg.nctaid.x()
- out[i++] = gridDim.y; // CHECK: call noundef i32 @llvm.nvvm.read.ptx.sreg.nctaid.y()
- out[i++] = gridDim.z; // CHECK: call noundef i32 @llvm.nvvm.read.ptx.sreg.nctaid.z()
+ out[i++] = gridDim.x; // CHECK: call noundef{{.*}} i32 @llvm.nvvm.read.ptx.sreg.nctaid.x()
+ out[i++] = gridDim.y; // CHECK: call noundef{{.*}} i32 @llvm.nvvm.read.ptx.sreg.nctaid.y()
+ out[i++] = gridDim.z; // CHECK: call noundef{{.*}} i32 @llvm.nvvm.read.ptx.sreg.nctaid.z()
out[i++] = warpSize; // CHECK: store i32 32,
diff --git a/clang/test/CodeGenCXX/inline-then-fold-variadics.cpp b/clang/test/CodeGenCXX/inline-then-fold-variadics.cpp
new file mode 100644
index 0000000..a0673b9
--- /dev/null
+++ b/clang/test/CodeGenCXX/inline-then-fold-variadics.cpp
@@ -0,0 +1,181 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature
+// REQUIRES: webassembly-registered-target
+
+// Simple calls to known variadic functions that are completely elided when
+// optimisations are on This is a functional check that the expand-variadic pass
+// is consistent with clang's va_arg handling
+
+// When expand-variadics is added to the default pipeline, clang -O1 will
+// suffice here -Wno-varargs avoids warning second argument to 'va_start' is not
+// the last named parameter
+
+// RUN: %clang_cc1 %s -triple wasm32-unknown-unknown -Wno-varargs -O1 -emit-llvm -o - | opt - -S --passes='module(expand-variadics,default<O1>)' --expand-variadics-override=optimize -o - | FileCheck %s
+
+#include <stdarg.h>
+#include <stdint.h>
+
+template <typename X, typename Y> static X first(...) {
+ va_list va;
+ __builtin_va_start(va, 0);
+ X r = va_arg(va, X);
+ va_end(va);
+ return r;
+}
+
+template <typename X, typename Y> static Y second(...) {
+ va_list va;
+ __builtin_va_start(va, 0);
+ va_arg(va, X);
+ Y r = va_arg(va, Y);
+ va_end(va);
+ return r;
+}
+
+extern "C" {
+
+// CHECK-LABEL: define {{[^@]+}}@first_pair_i32
+// CHECK-SAME: (i32 noundef returned [[X:%.*]], i32 noundef [[Y:%.*]])
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 [[X]]
+//
+int first_pair_i32(int x, int y) { return first<int, int>(x, y); }
+
+// CHECK-LABEL: define {{[^@]+}}@second_pair_i32
+// CHECK-SAME: (i32 noundef [[X:%.*]], i32 noundef returned [[Y:%.*]])
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 [[Y]]
+//
+int second_pair_i32(int x, int y) { return second<int, int>(x, y); }
+
+// CHECK-LABEL: define {{[^@]+}}@first_pair_f64
+// CHECK-SAME: (double noundef returned [[X:%.*]], double noundef [[Y:%.*]])
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret double [[X]]
+//
+double first_pair_f64(double x, double y) {
+ return first<double, double>(x, y);
+}
+
+// CHECK-LABEL: define {{[^@]+}}@second_pair_f64
+// CHECK-SAME: (double noundef [[X:%.*]], double noundef returned [[Y:%.*]])
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret double [[Y]]
+//
+double second_pair_f64(double x, double y) {
+ return second<double, double>(x, y);
+}
+}
+
+extern "C" {
+
+// CHECK-LABEL: define {{[^@]+}}@first_i32_f64
+// CHECK-SAME: (i32 noundef returned [[X:%.*]], double noundef [[Y:%.*]])
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 [[X]]
+//
+int first_i32_f64(int x, double y) { return first<int, double>(x, y); }
+
+// CHECK-LABEL: define {{[^@]+}}@second_i32_f64
+// CHECK-SAME: (i32 noundef [[X:%.*]], double noundef returned [[Y:%.*]])
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret double [[Y]]
+//
+double second_i32_f64(int x, double y) { return second<int, double>(x, y); }
+
+// CHECK-LABEL: define {{[^@]+}}@first_f64_i32
+// CHECK-SAME: (double noundef returned [[X:%.*]], i32 noundef [[Y:%.*]])
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret double [[X]]
+//
+double first_f64_i32(double x, int y) { return first<double, int>(x, y); }
+
+// CHECK-LABEL: define {{[^@]+}}@second_f64_i32
+// CHECK-SAME: (double noundef [[X:%.*]], i32 noundef returned [[Y:%.*]])
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 [[Y]]
+//
+int second_f64_i32(double x, int y) { return second<double, int>(x, y); }
+}
+
+extern "C" {
+typedef uint64_t ulong2 __attribute__((__vector_size__(16), __aligned__(16)));
+
+// CHECK-LABEL: define {{[^@]+}}@first_i32_ulong2
+// CHECK-SAME: (i32 noundef returned [[X:%.*]], ptr nocapture noundef readonly [[Y:%.*]])
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 [[X]]
+//
+int first_i32_ulong2(int x, ulong2 *y) { return first<int, ulong2>(x, *y); }
+
+// CHECK-LABEL: define {{[^@]+}}@second_i32_ulong2
+// CHECK-SAME: (i32 noundef [[X:%.*]], ptr nocapture noundef readonly [[Y:%.*]], ptr nocapture noundef writeonly [[R:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr [[Y]], align 16, !tbaa [[TBAA2:![0-9]+]]
+// CHECK-NEXT: store <2 x i64> [[TMP0]], ptr [[R]], align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+void second_i32_ulong2(int x, ulong2 *y, ulong2 *r) {
+ *r = second<int, ulong2>(x, *y);
+}
+
+// CHECK-LABEL: define {{[^@]+}}@first_ulong2_i32
+// CHECK-SAME: (ptr nocapture noundef readonly [[X:%.*]], i32 noundef [[Y:%.*]], ptr nocapture noundef writeonly [[R:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr [[X]], align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: store <2 x i64> [[TMP0]], ptr [[R]], align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+void first_ulong2_i32(ulong2 *x, int y, ulong2 *r) {
+ *r = first<ulong2, int>(*x, y);
+}
+
+// CHECK-LABEL: define {{[^@]+}}@second_ulong2_i32
+// CHECK-SAME: (ptr nocapture noundef readonly [[X:%.*]], i32 noundef returned [[Y:%.*]])
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 [[Y]]
+//
+int second_ulong2_i32(ulong2 *x, int y) { return second<ulong2, int>(*x, y); }
+}
+
+// ascending alignment
+typedef struct {
+ char c;
+ short s;
+ int i;
+ long l;
+ float f;
+ double d;
+} asc;
+
+extern "C" {
+
+// CHECK-LABEL: define {{[^@]+}}@first_i32_asc
+// CHECK-SAME: (i32 noundef returned [[X:%.*]], ptr nocapture noundef readonly [[Y:%.*]])
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 [[X]]
+//
+int first_i32_asc(int x, asc *y) { return first<int, asc>(x, *y); }
+
+// CHECK-LABEL: define {{[^@]+}}@second_i32_asc
+// CHECK-SAME: (i32 noundef [[X:%.*]], ptr nocapture noundef readonly [[Y:%.*]], ptr nocapture noundef writeonly [[R:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void @llvm.memmove.p0.p0.i32(ptr noundef nonnull align 8 dereferenceable(24) [[R]], ptr noundef nonnull align 1 dereferenceable(24) [[Y]], i32 24, i1 false)
+// CHECK-NEXT: ret void
+//
+void second_i32_asc(int x, asc *y, asc *r) { *r = second<int, asc>(x, *y); }
+
+// CHECK-LABEL: define {{[^@]+}}@first_asc_i32
+// CHECK-SAME: (ptr nocapture noundef readonly [[X:%.*]], i32 noundef [[Y:%.*]], ptr nocapture noundef writeonly [[R:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void @llvm.memmove.p0.p0.i32(ptr noundef nonnull align 8 dereferenceable(24) [[R]], ptr noundef nonnull align 1 dereferenceable(24) [[X]], i32 24, i1 false)
+// CHECK-NEXT: ret void
+//
+void first_asc_i32(asc *x, int y, asc *r) { *r = first<asc, int>(*x, y); }
+
+// CHECK-LABEL: define {{[^@]+}}@second_asc_i32
+// CHECK-SAME: (ptr nocapture noundef readonly [[X:%.*]], i32 noundef returned [[Y:%.*]])
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 [[Y]]
+//
+int second_asc_i32(asc *x, int y) { return second<asc, int>(*x, y); }
+}
diff --git a/clang/test/CodeGenCXX/pointers-to-data-members.cpp b/clang/test/CodeGenCXX/pointers-to-data-members.cpp
index 29f1c3f..cf1d6c0 100644
--- a/clang/test/CodeGenCXX/pointers-to-data-members.cpp
+++ b/clang/test/CodeGenCXX/pointers-to-data-members.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 %s -emit-llvm -o %t.ll -triple=x86_64-apple-darwin10
+// RUN: %clang_cc1 %s -emit-llvm -o %t.ll -triple=x86_64-apple-darwin10 -fexperimental-new-constant-interpreter
// RUN: FileCheck %s < %t.ll
// RUN: FileCheck -check-prefix=CHECK-GLOBAL %s < %t.ll
diff --git a/clang/test/CodeGenCXX/template-param-objects-linkage.cpp b/clang/test/CodeGenCXX/template-param-objects-linkage.cpp
index 63e7d8c..9c148ed 100644
--- a/clang/test/CodeGenCXX/template-param-objects-linkage.cpp
+++ b/clang/test/CodeGenCXX/template-param-objects-linkage.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -triple x86_64-linux-gnu -std=c++20 %s -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -std=c++20 %s -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s
struct S { char buf[32]; };
template<S s> constexpr const char* f() { return s.buf; }
diff --git a/clang/test/CodeGenOpenCL/amdgpu-features.cl b/clang/test/CodeGenOpenCL/amdgpu-features.cl
index 2fda52d..854ab39 100644
--- a/clang/test/CodeGenOpenCL/amdgpu-features.cl
+++ b/clang/test/CodeGenOpenCL/amdgpu-features.cl
@@ -49,6 +49,7 @@
// RUN: %clang_cc1 -triple amdgcn -target-cpu gfx1103 -emit-llvm -o - %s | FileCheck --check-prefix=GFX1103 %s
// RUN: %clang_cc1 -triple amdgcn -target-cpu gfx1150 -emit-llvm -o - %s | FileCheck --check-prefix=GFX1150 %s
// RUN: %clang_cc1 -triple amdgcn -target-cpu gfx1151 -emit-llvm -o - %s | FileCheck --check-prefix=GFX1151 %s
+// RUN: %clang_cc1 -triple amdgcn -target-cpu gfx1152 -emit-llvm -o - %s | FileCheck --check-prefix=GFX1152 %s
// RUN: %clang_cc1 -triple amdgcn -target-cpu gfx1200 -emit-llvm -o - %s | FileCheck --check-prefix=GFX1200 %s
// RUN: %clang_cc1 -triple amdgcn -target-cpu gfx1201 -emit-llvm -o - %s | FileCheck --check-prefix=GFX1201 %s
@@ -100,6 +101,7 @@
// GFX1103: "target-features"="+16-bit-insts,+atomic-fadd-rtn-insts,+ci-insts,+dl-insts,+dot10-insts,+dot5-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32"
// GFX1150: "target-features"="+16-bit-insts,+atomic-fadd-rtn-insts,+ci-insts,+dl-insts,+dot10-insts,+dot5-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32"
// GFX1151: "target-features"="+16-bit-insts,+atomic-fadd-rtn-insts,+ci-insts,+dl-insts,+dot10-insts,+dot5-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32"
+// GFX1152: "target-features"="+16-bit-insts,+atomic-fadd-rtn-insts,+ci-insts,+dl-insts,+dot10-insts,+dot5-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32"
// GFX1200: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot10-insts,+dot11-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+fp8-conversion-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32"
// GFX1201: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot10-insts,+dot11-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+fp8-conversion-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32"
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx11.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx11.cl
index d17ff81..6606178 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx11.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx11.cl
@@ -5,6 +5,7 @@
// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1103 -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1150 -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1151 -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1152 -emit-llvm -o - %s | FileCheck %s
typedef unsigned int uint;
typedef unsigned long ulong;
diff --git a/clang/test/Driver/aarch64-oryon-1.c b/clang/test/Driver/aarch64-oryon-1.c
new file mode 100644
index 0000000..952ba5d
--- /dev/null
+++ b/clang/test/Driver/aarch64-oryon-1.c
@@ -0,0 +1,19 @@
+// RUN: %clang -target aarch64 -mcpu=oryon-1 -### -c %s 2>&1 | FileCheck -check-prefix=Phoenix %s
+// RUN: %clang -target aarch64 -mlittle-endian -mcpu=oryon-1 -### -c %s 2>&1 | FileCheck -check-prefix=Phoenix %s
+// RUN: %clang -target aarch64_be -mlittle-endian -mcpu=oryon-1 -### -c %s 2>&1 | FileCheck -check-prefix=Phoenix %s
+// RUN: %clang -target aarch64 -mtune=oryon-1 -### -c %s 2>&1 | FileCheck -check-prefix=Phoenix-TUNE %s
+// RUN: %clang -target aarch64 -mlittle-endian -mtune=oryon-1 -### -c %s 2>&1 | FileCheck -check-prefix=Phoenix-TUNE %s
+// RUN: %clang -target aarch64_be -mlittle-endian -mtune=oryon-1 -### -c %s 2>&1 | FileCheck -check-prefix=Phoenix-TUNE %s
+// Phoenix: "-cc1"{{.*}} "-triple" "aarch64{{(--)?}}"{{.*}} "-target-cpu" "oryon-1" "-target-feature" "+v8.6a"
+// Phoenix-TUNE: "-cc1"{{.*}} "-triple" "aarch64{{(--)?}}"{{.*}} "-target-cpu" "generic"
+
+// RUN: %clang -target arm64 -mcpu=oryon-1 -### -c %s 2>&1 | FileCheck -check-prefix=ARM64-Phoenix %s
+// RUN: %clang -target arm64 -mlittle-endian -mcpu=oryon-1 -### -c %s 2>&1 | FileCheck -check-prefix=ARM64-Phoenix %s
+// RUN: %clang -target arm64 -mtune=oryon-1 -### -c %s 2>&1 | FileCheck -check-prefix=ARM64-Phoenix-TUNE %s
+// RUN: %clang -target arm64 -mlittle-endian -mtune=oryon-1 -### -c %s 2>&1 | FileCheck -check-prefix=ARM64-Phoenix-TUNE %s
+// ARM64-Phoenix: "-cc1"{{.*}} "-triple" "arm64{{.*}}" "-target-cpu" "oryon-1" "-target-feature" "+v8.6a"
+// ARM64-Phoenix-TUNE: "-cc1"{{.*}} "-triple" "arm64{{.*}}" "-target-cpu" "generic"
+
+// RUN: %clang -target aarch64 -mcpu=oryon-1 -mtune=cortex-a53 -### -c %s 2>&1 | FileCheck -check-prefix=MCPU-MTUNE-Phoenix %s
+// RUN: %clang -target aarch64 -mtune=cortex-a53 -mcpu=oryon-1 -### -c %s 2>&1 | FileCheck -check-prefix=MCPU-MTUNE-Phoenix %s
+// MCPU-MTUNE-Phoenix: "-cc1"{{.*}} "-triple" "aarch64{{.*}}" "-target-cpu" "oryon-1"
diff --git a/clang/test/Driver/amdgpu-macros.cl b/clang/test/Driver/amdgpu-macros.cl
index a878a7d..3e4a570 100644
--- a/clang/test/Driver/amdgpu-macros.cl
+++ b/clang/test/Driver/amdgpu-macros.cl
@@ -127,6 +127,7 @@
// RUN: %clang -E -dM -target amdgcn -mcpu=gfx1103 %s 2>&1 | FileCheck --check-prefixes=ARCH-GCN,FAST_FMAF %s -DWAVEFRONT_SIZE=32 -DCPU=gfx1103 -DFAMILY=GFX11
// RUN: %clang -E -dM -target amdgcn -mcpu=gfx1150 %s 2>&1 | FileCheck --check-prefixes=ARCH-GCN,FAST_FMAF %s -DWAVEFRONT_SIZE=32 -DCPU=gfx1150 -DFAMILY=GFX11
// RUN: %clang -E -dM -target amdgcn -mcpu=gfx1151 %s 2>&1 | FileCheck --check-prefixes=ARCH-GCN,FAST_FMAF %s -DWAVEFRONT_SIZE=32 -DCPU=gfx1151 -DFAMILY=GFX11
+// RUN: %clang -E -dM -target amdgcn -mcpu=gfx1152 %s 2>&1 | FileCheck --check-prefixes=ARCH-GCN,FAST_FMAF %s -DWAVEFRONT_SIZE=32 -DCPU=gfx1152 -DFAMILY=GFX11
// RUN: %clang -E -dM -target amdgcn -mcpu=gfx1200 %s 2>&1 | FileCheck --check-prefixes=ARCH-GCN,FAST_FMAF %s -DWAVEFRONT_SIZE=32 -DCPU=gfx1200 -DFAMILY=GFX12
// RUN: %clang -E -dM -target amdgcn -mcpu=gfx1201 %s 2>&1 | FileCheck --check-prefixes=ARCH-GCN,FAST_FMAF %s -DWAVEFRONT_SIZE=32 -DCPU=gfx1201 -DFAMILY=GFX12
diff --git a/clang/test/Driver/amdgpu-mcpu.cl b/clang/test/Driver/amdgpu-mcpu.cl
index 5b6a220..4b0ef92 100644
--- a/clang/test/Driver/amdgpu-mcpu.cl
+++ b/clang/test/Driver/amdgpu-mcpu.cl
@@ -112,6 +112,7 @@
// RUN: %clang -### -target amdgcn -mcpu=gfx1103 %s 2>&1 | FileCheck --check-prefix=GFX1103 %s
// RUN: %clang -### -target amdgcn -mcpu=gfx1150 %s 2>&1 | FileCheck --check-prefix=GFX1150 %s
// RUN: %clang -### -target amdgcn -mcpu=gfx1151 %s 2>&1 | FileCheck --check-prefix=GFX1151 %s
+// RUN: %clang -### -target amdgcn -mcpu=gfx1152 %s 2>&1 | FileCheck --check-prefix=GFX1152 %s
// RUN: %clang -### -target amdgcn -mcpu=gfx1200 %s 2>&1 | FileCheck --check-prefix=GFX1200 %s
// RUN: %clang -### -target amdgcn -mcpu=gfx1201 %s 2>&1 | FileCheck --check-prefix=GFX1201 %s
@@ -164,6 +165,7 @@
// GFX1103: "-target-cpu" "gfx1103"
// GFX1150: "-target-cpu" "gfx1150"
// GFX1151: "-target-cpu" "gfx1151"
+// GFX1152: "-target-cpu" "gfx1152"
// GFX1200: "-target-cpu" "gfx1200"
// GFX1201: "-target-cpu" "gfx1201"
diff --git a/clang/test/Interpreter/pretty-print.c b/clang/test/Interpreter/pretty-print.c
new file mode 100644
index 0000000..f6158ad
--- /dev/null
+++ b/clang/test/Interpreter/pretty-print.c
@@ -0,0 +1,8 @@
+// REQUIRES: host-supports-jit
+// UNSUPPORTED: system-aix
+// RUN: cat %s | clang-repl -Xcc -xc | FileCheck %s
+// RUN: cat %s | clang-repl -Xcc -std=c++11 | FileCheck %s
+
+const char* c_str = "Hello, world!"; c_str
+
+// CHECK: Not implement yet.
diff --git a/clang/test/Misc/target-invalid-cpu-note.c b/clang/test/Misc/target-invalid-cpu-note.c
index bad1374..cb5b675 100644
--- a/clang/test/Misc/target-invalid-cpu-note.c
+++ b/clang/test/Misc/target-invalid-cpu-note.c
@@ -5,11 +5,11 @@
// RUN: not %clang_cc1 -triple arm64--- -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix AARCH64
// AARCH64: error: unknown target CPU 'not-a-cpu'
-// AARCH64-NEXT: note: valid target CPU values are: cortex-a34, cortex-a35, cortex-a53, cortex-a55, cortex-a510, cortex-a520, cortex-a520ae, cortex-a57, cortex-a65, cortex-a65ae, cortex-a72, cortex-a73, cortex-a75, cortex-a76, cortex-a76ae, cortex-a77, cortex-a78, cortex-a78ae, cortex-a78c, cortex-a710, cortex-a715, cortex-a720, cortex-a720ae, cortex-r82, cortex-r82ae, cortex-x1, cortex-x1c, cortex-x2, cortex-x3, cortex-x4, neoverse-e1, neoverse-n1, neoverse-n2, neoverse-n3, neoverse-512tvb, neoverse-v1, neoverse-v2, neoverse-v3, neoverse-v3ae, cyclone, apple-a7, apple-a8, apple-a9, apple-a10, apple-a11, apple-a12, apple-a13, apple-a14, apple-a15, apple-a16, apple-a17, apple-m1, apple-m2, apple-m3, apple-s4, apple-s5, exynos-m3, exynos-m4, exynos-m5, falkor, saphira, kryo, thunderx2t99, thunderx3t110, thunderx, thunderxt88, thunderxt81, thunderxt83, tsv110, a64fx, carmel, ampere1, ampere1a, ampere1b, cobalt-100, grace{{$}}
+// AARCH64-NEXT: note: valid target CPU values are: cortex-a34, cortex-a35, cortex-a53, cortex-a55, cortex-a510, cortex-a520, cortex-a520ae, cortex-a57, cortex-a65, cortex-a65ae, cortex-a72, cortex-a73, cortex-a75, cortex-a76, cortex-a76ae, cortex-a77, cortex-a78, cortex-a78ae, cortex-a78c, cortex-a710, cortex-a715, cortex-a720, cortex-a720ae, cortex-r82, cortex-r82ae, cortex-x1, cortex-x1c, cortex-x2, cortex-x3, cortex-x4, neoverse-e1, neoverse-n1, neoverse-n2, neoverse-n3, neoverse-512tvb, neoverse-v1, neoverse-v2, neoverse-v3, neoverse-v3ae, cyclone, apple-a7, apple-a8, apple-a9, apple-a10, apple-a11, apple-a12, apple-a13, apple-a14, apple-a15, apple-a16, apple-a17, apple-m1, apple-m2, apple-m3, apple-s4, apple-s5, exynos-m3, exynos-m4, exynos-m5, falkor, saphira, kryo, thunderx2t99, thunderx3t110, thunderx, thunderxt88, thunderxt81, thunderxt83, tsv110, a64fx, carmel, ampere1, ampere1a, ampere1b, oryon-1, cobalt-100, grace{{$}}
// RUN: not %clang_cc1 -triple arm64--- -tune-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix TUNE_AARCH64
// TUNE_AARCH64: error: unknown target CPU 'not-a-cpu'
-// TUNE_AARCH64-NEXT: note: valid target CPU values are: cortex-a34, cortex-a35, cortex-a53, cortex-a55, cortex-a510, cortex-a520, cortex-a520ae, cortex-a57, cortex-a65, cortex-a65ae, cortex-a72, cortex-a73, cortex-a75, cortex-a76, cortex-a76ae, cortex-a77, cortex-a78, cortex-a78ae, cortex-a78c, cortex-a710, cortex-a715, cortex-a720, cortex-a720ae, cortex-r82, cortex-r82ae, cortex-x1, cortex-x1c, cortex-x2, cortex-x3, cortex-x4, neoverse-e1, neoverse-n1, neoverse-n2, neoverse-n3, neoverse-512tvb, neoverse-v1, neoverse-v2, neoverse-v3, neoverse-v3ae, cyclone, apple-a7, apple-a8, apple-a9, apple-a10, apple-a11, apple-a12, apple-a13, apple-a14, apple-a15, apple-a16, apple-a17, apple-m1, apple-m2, apple-m3, apple-s4, apple-s5, exynos-m3, exynos-m4, exynos-m5, falkor, saphira, kryo, thunderx2t99, thunderx3t110, thunderx, thunderxt88, thunderxt81, thunderxt83, tsv110, a64fx, carmel, ampere1, ampere1a, ampere1b, cobalt-100, grace{{$}}
+// TUNE_AARCH64-NEXT: note: valid target CPU values are: cortex-a34, cortex-a35, cortex-a53, cortex-a55, cortex-a510, cortex-a520, cortex-a520ae, cortex-a57, cortex-a65, cortex-a65ae, cortex-a72, cortex-a73, cortex-a75, cortex-a76, cortex-a76ae, cortex-a77, cortex-a78, cortex-a78ae, cortex-a78c, cortex-a710, cortex-a715, cortex-a720, cortex-a720ae, cortex-r82, cortex-r82ae, cortex-x1, cortex-x1c, cortex-x2, cortex-x3, cortex-x4, neoverse-e1, neoverse-n1, neoverse-n2, neoverse-n3, neoverse-512tvb, neoverse-v1, neoverse-v2, neoverse-v3, neoverse-v3ae, cyclone, apple-a7, apple-a8, apple-a9, apple-a10, apple-a11, apple-a12, apple-a13, apple-a14, apple-a15, apple-a16, apple-a17, apple-m1, apple-m2, apple-m3, apple-s4, apple-s5, exynos-m3, exynos-m4, exynos-m5, falkor, saphira, kryo, thunderx2t99, thunderx3t110, thunderx, thunderxt88, thunderxt81, thunderxt83, tsv110, a64fx, carmel, ampere1, ampere1a, ampere1b, oryon-1, cobalt-100, grace{{$}}
// RUN: not %clang_cc1 -triple i386--- -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix X86
// X86: error: unknown target CPU 'not-a-cpu'
@@ -29,7 +29,7 @@
// RUN: not %clang_cc1 -triple nvptx--- -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix NVPTX
// NVPTX: error: unknown target CPU 'not-a-cpu'
-// NVPTX-NEXT: note: valid target CPU values are: sm_20, sm_21, sm_30, sm_32, sm_35, sm_37, sm_50, sm_52, sm_53, sm_60, sm_61, sm_62, sm_70, sm_72, sm_75, sm_80, sm_86, sm_87, sm_89, sm_90, sm_90a, gfx600, gfx601, gfx602, gfx700, gfx701, gfx702, gfx703, gfx704, gfx705, gfx801, gfx802, gfx803, gfx805, gfx810, gfx9-generic, gfx900, gfx902, gfx904, gfx906, gfx908, gfx909, gfx90a, gfx90c, gfx940, gfx941, gfx942, gfx10-1-generic, gfx1010, gfx1011, gfx1012, gfx1013, gfx10-3-generic, gfx1030, gfx1031, gfx1032, gfx1033, gfx1034, gfx1035, gfx1036, gfx11-generic, gfx1100, gfx1101, gfx1102, gfx1103, gfx1150, gfx1151, gfx12-generic, gfx1200, gfx1201{{$}}
+// NVPTX-NEXT: note: valid target CPU values are: sm_20, sm_21, sm_30, sm_32, sm_35, sm_37, sm_50, sm_52, sm_53, sm_60, sm_61, sm_62, sm_70, sm_72, sm_75, sm_80, sm_86, sm_87, sm_89, sm_90, sm_90a, gfx600, gfx601, gfx602, gfx700, gfx701, gfx702, gfx703, gfx704, gfx705, gfx801, gfx802, gfx803, gfx805, gfx810, gfx9-generic, gfx900, gfx902, gfx904, gfx906, gfx908, gfx909, gfx90a, gfx90c, gfx940, gfx941, gfx942, gfx10-1-generic, gfx1010, gfx1011, gfx1012, gfx1013, gfx10-3-generic, gfx1030, gfx1031, gfx1032, gfx1033, gfx1034, gfx1035, gfx1036, gfx11-generic, gfx1100, gfx1101, gfx1102, gfx1103, gfx1150, gfx1151, gfx1152, gfx12-generic, gfx1200, gfx1201{{$}}
// RUN: not %clang_cc1 -triple r600--- -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix R600
// R600: error: unknown target CPU 'not-a-cpu'
@@ -37,7 +37,7 @@
// RUN: not %clang_cc1 -triple amdgcn--- -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix AMDGCN
// AMDGCN: error: unknown target CPU 'not-a-cpu'
-// AMDGCN-NEXT: note: valid target CPU values are: gfx600, tahiti, gfx601, pitcairn, verde, gfx602, hainan, oland, gfx700, kaveri, gfx701, hawaii, gfx702, gfx703, kabini, mullins, gfx704, bonaire, gfx705, gfx801, carrizo, gfx802, iceland, tonga, gfx803, fiji, polaris10, polaris11, gfx805, tongapro, gfx810, stoney, gfx900, gfx902, gfx904, gfx906, gfx908, gfx909, gfx90a, gfx90c, gfx940, gfx941, gfx942, gfx1010, gfx1011, gfx1012, gfx1013, gfx1030, gfx1031, gfx1032, gfx1033, gfx1034, gfx1035, gfx1036, gfx1100, gfx1101, gfx1102, gfx1103, gfx1150, gfx1151, gfx1200, gfx1201, gfx9-generic, gfx10-1-generic, gfx10-3-generic, gfx11-generic, gfx12-generic{{$}}
+// AMDGCN-NEXT: note: valid target CPU values are: gfx600, tahiti, gfx601, pitcairn, verde, gfx602, hainan, oland, gfx700, kaveri, gfx701, hawaii, gfx702, gfx703, kabini, mullins, gfx704, bonaire, gfx705, gfx801, carrizo, gfx802, iceland, tonga, gfx803, fiji, polaris10, polaris11, gfx805, tongapro, gfx810, stoney, gfx900, gfx902, gfx904, gfx906, gfx908, gfx909, gfx90a, gfx90c, gfx940, gfx941, gfx942, gfx1010, gfx1011, gfx1012, gfx1013, gfx1030, gfx1031, gfx1032, gfx1033, gfx1034, gfx1035, gfx1036, gfx1100, gfx1101, gfx1102, gfx1103, gfx1150, gfx1151, gfx1152, gfx1200, gfx1201, gfx9-generic, gfx10-1-generic, gfx10-3-generic, gfx11-generic, gfx12-generic{{$}}
// RUN: not %clang_cc1 -triple wasm64--- -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix WEBASM
// WEBASM: error: unknown target CPU 'not-a-cpu'
diff --git a/clang/test/SemaCXX/attr-weak.cpp b/clang/test/SemaCXX/attr-weak.cpp
index f065bfd..0f9a297 100644
--- a/clang/test/SemaCXX/attr-weak.cpp
+++ b/clang/test/SemaCXX/attr-weak.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fsyntax-only -verify -std=c++11 %s
+// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fsyntax-only -verify -std=c++11 %s -fexperimental-new-constant-interpreter
static int test0 __attribute__((weak)); // expected-error {{weak declaration cannot have internal linkage}}
static void test1() __attribute__((weak)); // expected-error {{weak declaration cannot have internal linkage}}
diff --git a/clang/test/SemaCXX/builtin-is-bitwise-cloneable-fsanitize.cpp b/clang/test/SemaCXX/builtin-is-bitwise-cloneable-fsanitize.cpp
new file mode 100644
index 0000000..d47a39a
--- /dev/null
+++ b/clang/test/SemaCXX/builtin-is-bitwise-cloneable-fsanitize.cpp
@@ -0,0 +1,34 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux -DSANITIZER_ENABLED -fsanitize=address -fsanitize-address-field-padding=1 %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux %s
+
+struct S {
+ ~S() {}
+ virtual void foo() {}
+
+ int buffer[1];
+ int other_field = 0;
+};
+
+union U {
+ S s;
+};
+
+struct Derived : S {};
+
+static_assert(!__is_trivially_copyable(S));
+#ifdef SANITIZER_ENABLED
+// Don't allow memcpy when the struct has poisoned padding bits.
+// The sanitizer adds posion padding bits to struct S.
+static_assert(sizeof(S) > 16);
+static_assert(!__is_bitwise_cloneable(S));
+static_assert(sizeof(U) == sizeof(S)); // no padding bit for U.
+static_assert(!__is_bitwise_cloneable(U));
+static_assert(!__is_bitwise_cloneable(S[2]));
+static_assert(!__is_bitwise_cloneable(Derived));
+#else
+static_assert(sizeof(S) == 16);
+static_assert(__is_bitwise_cloneable(S));
+static_assert(__is_bitwise_cloneable(U));
+static_assert(__is_bitwise_cloneable(S[2]));
+static_assert(__is_bitwise_cloneable(Derived));
+#endif
diff --git a/clang/test/SemaCXX/builtin-is-bitwise-cloneable.cpp b/clang/test/SemaCXX/builtin-is-bitwise-cloneable.cpp
new file mode 100644
index 0000000..1781cf4
--- /dev/null
+++ b/clang/test/SemaCXX/builtin-is-bitwise-cloneable.cpp
@@ -0,0 +1,8 @@
+// RUN: %clang_cc1 -fsyntax-only -verify %s
+//
+struct DynamicClass { virtual int Foo(); };
+static_assert(!__is_trivially_copyable(DynamicClass));
+static_assert(__is_bitwise_cloneable(DynamicClass));
+
+struct InComplete; // expected-note{{forward declaration}}
+static_assert(!__is_bitwise_cloneable(InComplete)); // expected-error{{incomplete type 'InComplete' used in type trait expression}}
diff --git a/clang/test/SemaCXX/constexpr-default-arg.cpp b/clang/test/SemaCXX/constexpr-default-arg.cpp
index 901123b..ec9b292 100644
--- a/clang/test/SemaCXX/constexpr-default-arg.cpp
+++ b/clang/test/SemaCXX/constexpr-default-arg.cpp
@@ -32,8 +32,8 @@ void test_default_arg2() {
}
// Check that multiple CXXDefaultInitExprs don't cause an assertion failure.
-struct A { int &&r = 0; };
+struct A { int &&r = 0; }; // expected-note 2{{default member initializer}}
struct B { A x, y; };
-B b = {}; // expected-no-diagnostics
+B b = {}; // expected-warning 2{{lifetime extension of temporary created by aggregate initialization using a default member initializer is not yet supported}}
}
diff --git a/clang/test/SemaCXX/cxx11-default-member-initializers.cpp b/clang/test/SemaCXX/cxx11-default-member-initializers.cpp
index 1ea8b98..dd8e9c6 100644
--- a/clang/test/SemaCXX/cxx11-default-member-initializers.cpp
+++ b/clang/test/SemaCXX/cxx11-default-member-initializers.cpp
@@ -27,80 +27,6 @@ class MemInit {
C m = s;
};
-namespace std {
-typedef decltype(sizeof(int)) size_t;
-
-// libc++'s implementation
-template <class _E> class initializer_list {
- const _E *__begin_;
- size_t __size_;
-
- initializer_list(const _E *__b, size_t __s) : __begin_(__b), __size_(__s) {}
-
-public:
- typedef _E value_type;
- typedef const _E &reference;
- typedef const _E &const_reference;
- typedef size_t size_type;
-
- typedef const _E *iterator;
- typedef const _E *const_iterator;
-
- initializer_list() : __begin_(nullptr), __size_(0) {}
-
- size_t size() const { return __size_; }
- const _E *begin() const { return __begin_; }
- const _E *end() const { return __begin_ + __size_; }
-};
-} // namespace std
-
-#if __cplusplus >= 201703L
-namespace test_rebuild {
-template <typename T, int> class C {
-public:
- C(std::initializer_list<T>);
-};
-
-template <typename T> using Ptr = __remove_pointer(T) *;
-template <typename T> C(T) -> C<Ptr<T>, sizeof(T)>;
-
-class A {
-public:
- template <typename T1, typename T2> T1 *some_func(T2 &&);
-};
-
-struct B : A {
- // Test CXXDefaultInitExpr rebuild issue in
- // https://github.com/llvm/llvm-project/pull/87933
- int *ar = some_func<int>(C{some_func<int>(0)});
- B() {}
-};
-
-int TestBody_got;
-template <int> class Vector {
-public:
- Vector(std::initializer_list<int>);
-};
-template <typename... Ts> Vector(Ts...) -> Vector<sizeof...(Ts)>;
-class ProgramBuilder {
-public:
- template <typename T, typename ARGS> int *create(ARGS);
-};
-
-struct TypeTest : ProgramBuilder {
- int *str_f16 = create<int>(Vector{0});
- TypeTest() {}
-};
-class TypeTest_Element_Test : TypeTest {
- void TestBody();
-};
-void TypeTest_Element_Test::TestBody() {
- int *expect = str_f16;
- &TestBody_got != expect; // expected-warning {{inequality comparison result unused}}
-}
-} // namespace test_rebuild
-#endif // __cplusplus >= 201703L
-
#if __cplusplus >= 202002L
// This test ensures cleanup expressions are correctly produced
// in the presence of default member initializers.
diff --git a/clang/test/SemaCXX/eval-crashes.cpp b/clang/test/SemaCXX/eval-crashes.cpp
index a06f60f..017df97 100644
--- a/clang/test/SemaCXX/eval-crashes.cpp
+++ b/clang/test/SemaCXX/eval-crashes.cpp
@@ -25,9 +25,11 @@ namespace pr33140_0b {
}
namespace pr33140_2 {
- struct A { int &&r = 0; };
+ // FIXME: The declaration of 'b' below should lifetime-extend two int
+ // temporaries.
+ struct A { int &&r = 0; }; // expected-note 2{{initializing field 'r' with default member initializer}}
struct B { A x, y; };
- B b = {};
+ B b = {}; // expected-warning 2{{lifetime extension of temporary created by aggregate initialization using a default member initializer is not yet supported}}
}
namespace pr33140_3 {
diff --git a/clang/test/SemaCXX/nullptr_in_arithmetic_ops.cpp b/clang/test/SemaCXX/nullptr_in_arithmetic_ops.cpp
index 6273d9c..98bec18 100644
--- a/clang/test/SemaCXX/nullptr_in_arithmetic_ops.cpp
+++ b/clang/test/SemaCXX/nullptr_in_arithmetic_ops.cpp
@@ -1,4 +1,5 @@
// RUN: %clang_cc1 -fsyntax-only -Wno-tautological-pointer-compare -fblocks -std=c++11 -verify %s
+// RUN: %clang_cc1 -fsyntax-only -Wno-tautological-pointer-compare -fblocks -std=c++11 -verify %s -fexperimental-new-constant-interpreter
void foo() {
int a;
diff --git a/clang/test/SemaObjCXX/arc-type-traits.mm b/clang/test/SemaObjCXX/arc-type-traits.mm
index 2d30ae4..25bc8b36 100644
--- a/clang/test/SemaObjCXX/arc-type-traits.mm
+++ b/clang/test/SemaObjCXX/arc-type-traits.mm
@@ -221,3 +221,12 @@ TRAIT_IS_TRUE(__is_trivially_relocatable, __unsafe_unretained id);
TRAIT_IS_TRUE(__is_trivially_relocatable, HasStrong);
TRAIT_IS_FALSE(__is_trivially_relocatable, HasWeak);
TRAIT_IS_TRUE(__is_trivially_relocatable, HasUnsafeUnretained);
+
+// __is_bitwise_cloneable
+TRAIT_IS_FALSE(__is_bitwise_cloneable, __strong id);
+TRAIT_IS_FALSE(__is_bitwise_cloneable, __weak id);
+TRAIT_IS_FALSE(__is_bitwise_cloneable, __autoreleasing id);
+TRAIT_IS_TRUE(__is_trivial, __unsafe_unretained id);
+TRAIT_IS_FALSE(__is_bitwise_cloneable, HasStrong);
+TRAIT_IS_FALSE(__is_bitwise_cloneable, HasWeak);
+TRAIT_IS_TRUE(__is_bitwise_cloneable, HasUnsafeUnretained);
diff --git a/clang/test/SemaOpenACC/loop-construct-auto_seq_independent-clauses.c b/clang/test/SemaOpenACC/loop-construct-auto_seq_independent-clauses.c
index 23f852e..ac61976 100644
--- a/clang/test/SemaOpenACC/loop-construct-auto_seq_independent-clauses.c
+++ b/clang/test/SemaOpenACC/loop-construct-auto_seq_independent-clauses.c
@@ -106,7 +106,6 @@ void uses() {
// expected-error@+1{{OpenACC 'present' clause is not valid on 'loop' directive}}
#pragma acc loop auto present(Var)
for(;;);
- // expected-warning@+1{{OpenACC clause 'private' not yet implemented}}
#pragma acc loop auto private(Var)
for(;;);
// expected-error@+1{{OpenACC 'copyout' clause is not valid on 'loop' directive}}
@@ -246,7 +245,6 @@ void uses() {
// expected-error@+1{{OpenACC 'present' clause is not valid on 'loop' directive}}
#pragma acc loop present(Var) auto
for(;;);
- // expected-warning@+1{{OpenACC clause 'private' not yet implemented}}
#pragma acc loop private(Var) auto
for(;;);
// expected-error@+1{{OpenACC 'copyout' clause is not valid on 'loop' directive}}
@@ -387,7 +385,6 @@ void uses() {
// expected-error@+1{{OpenACC 'present' clause is not valid on 'loop' directive}}
#pragma acc loop independent present(Var)
for(;;);
- // expected-warning@+1{{OpenACC clause 'private' not yet implemented}}
#pragma acc loop independent private(Var)
for(;;);
// expected-error@+1{{OpenACC 'copyout' clause is not valid on 'loop' directive}}
@@ -527,7 +524,6 @@ void uses() {
// expected-error@+1{{OpenACC 'present' clause is not valid on 'loop' directive}}
#pragma acc loop present(Var) independent
for(;;);
- // expected-warning@+1{{OpenACC clause 'private' not yet implemented}}
#pragma acc loop private(Var) independent
for(;;);
// expected-error@+1{{OpenACC 'copyout' clause is not valid on 'loop' directive}}
@@ -677,7 +673,6 @@ void uses() {
// expected-error@+1{{OpenACC 'present' clause is not valid on 'loop' directive}}
#pragma acc loop seq present(Var)
for(;;);
- // expected-warning@+1{{OpenACC clause 'private' not yet implemented}}
#pragma acc loop seq private(Var)
for(;;);
// expected-error@+1{{OpenACC 'copyout' clause is not valid on 'loop' directive}}
@@ -826,7 +821,6 @@ void uses() {
// expected-error@+1{{OpenACC 'present' clause is not valid on 'loop' directive}}
#pragma acc loop present(Var) seq
for(;;);
- // expected-warning@+1{{OpenACC clause 'private' not yet implemented}}
#pragma acc loop private(Var) seq
for(;;);
// expected-error@+1{{OpenACC 'copyout' clause is not valid on 'loop' directive}}
diff --git a/clang/test/SemaOpenACC/loop-construct-private-clause.c b/clang/test/SemaOpenACC/loop-construct-private-clause.c
new file mode 100644
index 0000000..f3ffdfb
--- /dev/null
+++ b/clang/test/SemaOpenACC/loop-construct-private-clause.c
@@ -0,0 +1,132 @@
+// RUN: %clang_cc1 %s -fopenacc -verify
+
+struct Incomplete;
+enum SomeE{ A };
+typedef struct IsComplete {
+ struct S { int A; } CompositeMember;
+ int ScalarMember;
+ float ArrayMember[5];
+ enum SomeE EnumMember;
+ void *PointerMember;
+} Complete;
+
+int GlobalInt;
+float GlobalArray[5];
+short *GlobalPointer;
+Complete GlobalComposite;
+
+void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete CompositeParam) {
+ int LocalInt;
+ short *LocalPointer;
+ float LocalArray[5];
+ Complete LocalComposite;
+
+ // Check Appertainment:
+#pragma acc loop private(LocalInt)
+ for(;;);
+
+ // Valid cases:
+#pragma acc loop private(LocalInt, LocalPointer, LocalArray)
+ for(;;);
+#pragma acc loop private(LocalArray)
+ for(;;);
+#pragma acc loop private(LocalArray[:])
+ for(;;);
+#pragma acc loop private(LocalArray[:5])
+ for(;;);
+#pragma acc loop private(LocalArray[2:])
+ for(;;);
+#pragma acc loop private(LocalArray[2:1])
+ for(;;);
+#pragma acc loop private(LocalArray[2])
+ for(;;);
+#pragma acc loop private(LocalComposite)
+ for(;;);
+#pragma acc loop private(LocalComposite.EnumMember)
+ for(;;);
+#pragma acc loop private(LocalComposite.ScalarMember)
+ for(;;);
+#pragma acc loop private(LocalComposite.ArrayMember)
+ for(;;);
+#pragma acc loop private(LocalComposite.ArrayMember[5])
+ for(;;);
+#pragma acc loop private(LocalComposite.PointerMember)
+ for(;;);
+#pragma acc loop private(GlobalInt, GlobalArray, GlobalPointer, GlobalComposite)
+ for(;;);
+#pragma acc loop private(GlobalArray[2], GlobalPointer[2], GlobalComposite.CompositeMember.A)
+ for(;;);
+#pragma acc loop private(LocalComposite, GlobalComposite)
+ for(;;);
+#pragma acc loop private(IntParam, PointerParam, ArrayParam, CompositeParam)
+ for(;;);
+#pragma acc loop private(PointerParam[IntParam], ArrayParam[IntParam], CompositeParam.CompositeMember.A)
+ for(;;);
+
+#pragma acc loop private(LocalArray) private(LocalArray[2])
+ for(;;);
+
+#pragma acc loop private(LocalArray, LocalArray[2])
+ for(;;);
+
+#pragma acc loop private(LocalComposite, LocalComposite.ScalarMember)
+ for(;;);
+
+#pragma acc loop private(LocalComposite.CompositeMember.A, LocalComposite.ScalarMember)
+ for(;;);
+
+#pragma acc loop private(LocalComposite.CompositeMember.A) private(LocalComposite.ScalarMember)
+ for(;;);
+
+ Complete LocalComposite2;
+#pragma acc loop private(LocalComposite2.ScalarMember, LocalComposite2.ScalarMember)
+ for(;;);
+
+ // Invalid cases, arbitrary expressions.
+ struct Incomplete *I;
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
+#pragma acc loop private(*I)
+ for(;;);
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
+#pragma acc loop private(GlobalInt + IntParam)
+ for(;;);
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
+#pragma acc loop private(+GlobalInt)
+ for(;;);
+
+ // expected-error@+1{{OpenACC sub-array length is unspecified and cannot be inferred because the subscripted value is not an array}}
+#pragma acc loop private(PointerParam[:])
+ for(;;);
+#pragma acc loop private(PointerParam[:5])
+ for(;;);
+#pragma acc loop private(PointerParam[:IntParam])
+ for(;;);
+ // expected-error@+1{{OpenACC sub-array length is unspecified and cannot be inferred because the subscripted value is not an array}}
+#pragma acc loop private(PointerParam[2:])
+ for(;;);
+#pragma acc loop private(PointerParam[2:5])
+ for(;;);
+#pragma acc loop private(PointerParam[2])
+ for(;;);
+#pragma acc loop private(ArrayParam[:])
+ for(;;);
+#pragma acc loop private(ArrayParam[:5])
+ for(;;);
+#pragma acc loop private(ArrayParam[:IntParam])
+ for(;;);
+#pragma acc loop private(ArrayParam[2:])
+ for(;;);
+ // expected-error@+1{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
+#pragma acc loop private(ArrayParam[2:5])
+ for(;;);
+#pragma acc loop private(ArrayParam[2])
+ for(;;);
+
+ // expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
+#pragma acc loop private((float*)ArrayParam[2:5])
+ for(;;);
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
+#pragma acc loop private((float)ArrayParam[2])
+ for(;;);
+}
diff --git a/clang/test/SemaOpenACC/loop-construct-private-clause.cpp b/clang/test/SemaOpenACC/loop-construct-private-clause.cpp
new file mode 100644
index 0000000..b5d3fc9
--- /dev/null
+++ b/clang/test/SemaOpenACC/loop-construct-private-clause.cpp
@@ -0,0 +1,155 @@
+// RUN: %clang_cc1 %s -fopenacc -verify
+
+struct Incomplete;
+enum SomeE{};
+typedef struct IsComplete {
+ struct S { int A; } CompositeMember;
+ int ScalarMember;
+ float ArrayMember[5];
+ SomeE EnumMember;
+ char *PointerMember;
+} Complete;
+
+int GlobalInt;
+float GlobalArray[5];
+char *GlobalPointer;
+Complete GlobalComposite;
+
+void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete CompositeParam, int &IntParamRef) {
+ int LocalInt;
+ char *LocalPointer;
+ float LocalArray[5];
+ Complete LocalComposite;
+
+ // Check Appertainment:
+
+#pragma acc loop private(LocalInt)
+ for(;;);
+
+ // Valid cases:
+#pragma acc loop private(LocalInt, LocalPointer, LocalArray)
+ for(;;);
+#pragma acc loop private(LocalArray)
+ for(;;);
+#pragma acc loop private(LocalArray[2])
+ for(;;);
+#pragma acc loop private(LocalComposite)
+ for(;;);
+#pragma acc loop private(LocalComposite.EnumMember)
+ for(;;);
+#pragma acc loop private(LocalComposite.ScalarMember)
+ for(;;);
+#pragma acc loop private(LocalComposite.ArrayMember)
+ for(;;);
+#pragma acc loop private(LocalComposite.ArrayMember[5])
+ for(;;);
+#pragma acc loop private(LocalComposite.PointerMember)
+ for(;;);
+#pragma acc loop private(GlobalInt, GlobalArray, GlobalPointer, GlobalComposite)
+ for(;;);
+#pragma acc loop private(GlobalArray[2], GlobalPointer[2], GlobalComposite.CompositeMember.A)
+ for(;;);
+#pragma acc loop private(LocalComposite, GlobalComposite)
+ for(;;);
+#pragma acc loop private(IntParam, PointerParam, ArrayParam, CompositeParam) private(IntParamRef)
+ for(;;);
+#pragma acc loop private(PointerParam[IntParam], ArrayParam[IntParam], CompositeParam.CompositeMember.A)
+ for(;;);
+
+
+ // Invalid cases, arbitrary expressions.
+ Incomplete *I;
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
+#pragma acc loop private(*I)
+ for(;;);
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
+#pragma acc loop private(GlobalInt + IntParam)
+ for(;;);
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
+#pragma acc loop private(+GlobalInt)
+ for(;;);
+}
+
+template<typename T, unsigned I, typename V>
+void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
+#pragma acc loop private(+t)
+ for(;;);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
+#pragma acc loop private(+I)
+ for(;;);
+
+ // NTTP's are only valid if it is a reference to something.
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
+ // expected-note@#TEMPL_USES_INST{{in instantiation of}}
+#pragma acc loop private(I)
+ for(;;);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
+#pragma acc loop private(t, I)
+ for(;;);
+
+#pragma acc loop private(arrayT)
+ for(;;);
+
+#pragma acc loop private(TemplComp)
+ for(;;);
+
+#pragma acc loop private(TemplComp.PointerMember[5])
+ for(;;);
+
+#pragma acc loop private(TemplComp.PointerMember[5]) private(TemplComp)
+ for(;;);
+
+ int *Pointer;
+#pragma acc loop private(Pointer[:I])
+ for(;;);
+#pragma acc loop private(Pointer[:t])
+ for(;;);
+ // expected-error@+1{{OpenACC sub-array length is unspecified and cannot be inferred because the subscripted value is not an array}}
+#pragma acc loop private(Pointer[1:])
+ for(;;);
+}
+
+template<unsigned I, auto &NTTP_REF>
+void NTTP() {
+ // NTTP's are only valid if it is a reference to something.
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
+ // expected-note@#NTTP_INST{{in instantiation of}}
+#pragma acc loop private(I)
+ for(;;);
+
+#pragma acc loop private(NTTP_REF)
+ for(;;);
+}
+
+struct S {
+ int ThisMember;
+ int ThisMemberArray[5];
+
+ void foo();
+};
+
+void S::foo() {
+#pragma acc loop private(ThisMember, this->ThisMemberArray[1])
+ for(;;);
+
+#pragma acc loop private(ThisMemberArray[1:2])
+ for(;;);
+
+#pragma acc loop private(this)
+ for(;;);
+
+#pragma acc loop private(ThisMember, this->ThisMember)
+ for(;;);
+}
+
+void Inst() {
+ static constexpr int NTTP_REFed = 1;
+ int i;
+ int Arr[5];
+ Complete C;
+ TemplUses(i, Arr, C); // #TEMPL_USES_INST
+ NTTP<5, NTTP_REFed>(); // #NTTP_INST
+}
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-gfx940-err.cl b/clang/test/SemaOpenCL/builtins-amdgcn-gfx940-err.cl
index 487cc53..2a1ba43 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-gfx940-err.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-gfx940-err.cl
@@ -3,8 +3,10 @@
typedef unsigned int u32;
-void test_global_load_lds_unsupported_size(global u32* src, local u32 *dst, u32 size) {
- __builtin_amdgcn_global_load_lds(src, dst, size, /*offset=*/0, /*aux=*/0); // expected-error{{expression is not an integer constant expression}}
+void test_global_load_lds_unsupported_size(global u32* src, local u32 *dst, u32 size, u32 offset, u32 aux) {
+ __builtin_amdgcn_global_load_lds(src, dst, size, /*offset=*/0, /*aux=*/0); // expected-error{{argument to '__builtin_amdgcn_global_load_lds' must be a constant integer}}
+ __builtin_amdgcn_global_load_lds(src, dst, /*size=*/4, offset, /*aux=*/0); // expected-error{{argument to '__builtin_amdgcn_global_load_lds' must be a constant integer}}
+ __builtin_amdgcn_global_load_lds(src, dst, /*size=*/4, /*offset=*/0, aux); // expected-error{{argument to '__builtin_amdgcn_global_load_lds' must be a constant integer}}
__builtin_amdgcn_global_load_lds(src, dst, /*size=*/5, /*offset=*/0, /*aux=*/0); // expected-error{{invalid size value}} expected-note {{size must be 1, 2, or 4}}
__builtin_amdgcn_global_load_lds(src, dst, /*size=*/0, /*offset=*/0, /*aux=*/0); // expected-error{{invalid size value}} expected-note {{size must be 1, 2, or 4}}
__builtin_amdgcn_global_load_lds(src, dst, /*size=*/3, /*offset=*/0, /*aux=*/0); // expected-error{{invalid size value}} expected-note {{size must be 1, 2, or 4}}
diff --git a/clang/unittests/AST/Interp/toAPValue.cpp b/clang/unittests/AST/Interp/toAPValue.cpp
index e56453a..d6879d6 100644
--- a/clang/unittests/AST/Interp/toAPValue.cpp
+++ b/clang/unittests/AST/Interp/toAPValue.cpp
@@ -186,3 +186,49 @@ TEST(ToAPValue, FunctionPointersC) {
ASSERT_EQ(I, 17);
}
}
+
+TEST(ToAPValue, MemberPointers) {
+ constexpr char Code[] = "struct S {\n"
+ " int m, n;\n"
+ "};\n"
+ "constexpr int S::*pm = &S::m;\n"
+ "constexpr int S::*nn = nullptr;\n";
+
+ auto AST = tooling::buildASTFromCodeWithArgs(
+ Code, {"-fexperimental-new-constant-interpreter"});
+
+ auto &Ctx = AST->getASTContext().getInterpContext();
+ Program &Prog = Ctx.getProgram();
+
+ auto getDecl = [&](const char *Name) -> const ValueDecl * {
+ auto Nodes =
+ match(valueDecl(hasName(Name)).bind("var"), AST->getASTContext());
+ assert(Nodes.size() == 1);
+ const auto *D = Nodes[0].getNodeAs<ValueDecl>("var");
+ assert(D);
+ return D;
+ };
+
+ auto getGlobalPtr = [&](const char *Name) -> Pointer {
+ const VarDecl *D = cast<VarDecl>(getDecl(Name));
+ return Prog.getPtrGlobal(*Prog.getGlobal(D));
+ };
+
+ {
+ const Pointer &GP = getGlobalPtr("pm");
+ ASSERT_TRUE(GP.isLive());
+ const MemberPointer &FP = GP.deref<MemberPointer>();
+ APValue A = FP.toAPValue();
+ ASSERT_EQ(A.getMemberPointerDecl(), getDecl("m"));
+ ASSERT_EQ(A.getKind(), APValue::MemberPointer);
+ }
+
+ {
+ const Pointer &GP = getGlobalPtr("nn");
+ ASSERT_TRUE(GP.isLive());
+ const MemberPointer &NP = GP.deref<MemberPointer>();
+ ASSERT_TRUE(NP.isZero());
+ APValue A = NP.toAPValue();
+ ASSERT_EQ(A.getKind(), APValue::MemberPointer);
+ }
+}
diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp
index 004ecb6..4e42726 100644
--- a/clang/unittests/Format/FormatTest.cpp
+++ b/clang/unittests/Format/FormatTest.cpp
@@ -24879,7 +24879,7 @@ TEST_F(FormatTest, SkipMacroDefinitionBody) {
Style);
// With comments.
- verifyFormat("/* */ #define A a // a a", "/* */ # define A a // a a",
+ verifyFormat("/* */ #define A a // a a", "/* */ # define A a // a a",
Style);
verifyNoChange("/* */ #define A a // a a", Style);
@@ -24891,6 +24891,15 @@ TEST_F(FormatTest, SkipMacroDefinitionBody) {
"int aaa; // a",
Style);
+ verifyNoChange(
+ "#define MACRO_WITH_COMMENTS() \\\n"
+ " public: \\\n"
+ " /* Documentation parsed by Doxygen for the following method. */ \\\n"
+ " static MyType getClassTypeId(); \\\n"
+ " /** Normal comment for the following method. */ \\\n"
+ " virtual MyType getTypeId() const;",
+ Style);
+
// multiline macro definitions
verifyNoChange("#define A a\\\n"
" A a \\\n "
diff --git a/clang/unittests/Lex/DependencyDirectivesScannerTest.cpp b/clang/unittests/Lex/DependencyDirectivesScannerTest.cpp
index 044c3d6..59fef9e 100644
--- a/clang/unittests/Lex/DependencyDirectivesScannerTest.cpp
+++ b/clang/unittests/Lex/DependencyDirectivesScannerTest.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "clang/Lex/DependencyDirectivesScanner.h"
-#include "clang/Basic/TokenKinds.h"
#include "llvm/ADT/SmallString.h"
#include "gtest/gtest.h"
@@ -18,11 +17,11 @@ using namespace clang::dependency_directives_scan;
static bool minimizeSourceToDependencyDirectives(
StringRef Input, SmallVectorImpl<char> &Out,
SmallVectorImpl<dependency_directives_scan::Token> &Tokens,
- SmallVectorImpl<Directive> &Directives, const LangOptions &LangOpts) {
+ SmallVectorImpl<Directive> &Directives) {
Out.clear();
Tokens.clear();
Directives.clear();
- if (scanSourceForDependencyDirectives(Input, Tokens, Directives, LangOpts))
+ if (scanSourceForDependencyDirectives(Input, Tokens, Directives))
return true;
raw_svector_ostream OS(Out);
@@ -39,9 +38,7 @@ static bool minimizeSourceToDependencyDirectives(StringRef Input,
SmallVectorImpl<char> &Out) {
SmallVector<dependency_directives_scan::Token, 16> Tokens;
SmallVector<Directive, 32> Directives;
- LangOptions LangOpts;
- return minimizeSourceToDependencyDirectives(Input, Out, Tokens, Directives,
- LangOpts);
+ return minimizeSourceToDependencyDirectives(Input, Out, Tokens, Directives);
}
namespace {
@@ -50,17 +47,16 @@ TEST(MinimizeSourceToDependencyDirectivesTest, Empty) {
SmallVector<char, 128> Out;
SmallVector<dependency_directives_scan::Token, 4> Tokens;
SmallVector<Directive, 4> Directives;
- LangOptions LangOpts;
- ASSERT_FALSE(minimizeSourceToDependencyDirectives("", Out, Tokens, Directives,
- LangOpts));
+ ASSERT_FALSE(
+ minimizeSourceToDependencyDirectives("", Out, Tokens, Directives));
EXPECT_TRUE(Out.empty());
EXPECT_TRUE(Tokens.empty());
ASSERT_EQ(1u, Directives.size());
ASSERT_EQ(pp_eof, Directives.back().Kind);
ASSERT_FALSE(minimizeSourceToDependencyDirectives("abc def\nxyz", Out, Tokens,
- Directives, LangOpts));
+ Directives));
EXPECT_STREQ("<TokBeforeEOF>\n", Out.data());
EXPECT_TRUE(Tokens.empty());
ASSERT_EQ(2u, Directives.size());
@@ -72,7 +68,6 @@ TEST(MinimizeSourceToDependencyDirectivesTest, AllTokens) {
SmallVector<char, 128> Out;
SmallVector<dependency_directives_scan::Token, 4> Tokens;
SmallVector<Directive, 4> Directives;
- LangOptions LangOpts;
ASSERT_FALSE(
minimizeSourceToDependencyDirectives("#define A\n"
@@ -97,7 +92,7 @@ TEST(MinimizeSourceToDependencyDirectivesTest, AllTokens) {
"export module m;\n"
"import m;\n"
"#pragma clang system_header\n",
- Out, Tokens, Directives, LangOpts));
+ Out, Tokens, Directives));
EXPECT_EQ(pp_define, Directives[0].Kind);
EXPECT_EQ(pp_undef, Directives[1].Kind);
EXPECT_EQ(pp_endif, Directives[2].Kind);
@@ -150,10 +145,9 @@ TEST(MinimizeSourceToDependencyDirectivesTest, Define) {
SmallVector<char, 128> Out;
SmallVector<dependency_directives_scan::Token, 4> Tokens;
SmallVector<Directive, 4> Directives;
- LangOptions LangOpts;
- ASSERT_FALSE(minimizeSourceToDependencyDirectives(
- "#define MACRO", Out, Tokens, Directives, LangOpts));
+ ASSERT_FALSE(minimizeSourceToDependencyDirectives("#define MACRO", Out,
+ Tokens, Directives));
EXPECT_STREQ("#define MACRO\n", Out.data());
ASSERT_EQ(4u, Tokens.size());
ASSERT_EQ(2u, Directives.size());
@@ -844,7 +838,6 @@ TEST(MinimizeSourceToDependencyDirectivesTest, PragmaOnce) {
SmallVector<char, 128> Out;
SmallVector<dependency_directives_scan::Token, 4> Tokens;
SmallVector<Directive, 4> Directives;
- LangOptions LangOpts;
StringRef Source = R"(// comment
#pragma once
@@ -852,8 +845,8 @@ TEST(MinimizeSourceToDependencyDirectivesTest, PragmaOnce) {
#include <test.h>
_Pragma("once")
)";
- ASSERT_FALSE(minimizeSourceToDependencyDirectives(Source, Out, Tokens,
- Directives, LangOpts));
+ ASSERT_FALSE(
+ minimizeSourceToDependencyDirectives(Source, Out, Tokens, Directives));
EXPECT_STREQ("#pragma once\n#include <test.h>\n_Pragma(\"once\")\n",
Out.data());
ASSERT_EQ(Directives.size(), 4u);
@@ -933,7 +926,6 @@ TEST(MinimizeSourceToDependencyDirectivesTest, CxxModules) {
SmallVector<char, 128> Out;
SmallVector<dependency_directives_scan::Token, 4> Tokens;
SmallVector<Directive, 4> Directives;
- LangOptions LangOpts;
StringRef Source = R"(
module;
@@ -962,8 +954,8 @@ ort \
import f(->a = 3);
}
)";
- ASSERT_FALSE(minimizeSourceToDependencyDirectives(Source, Out, Tokens,
- Directives, LangOpts));
+ ASSERT_FALSE(
+ minimizeSourceToDependencyDirectives(Source, Out, Tokens, Directives));
EXPECT_STREQ("#include \"textual-header.h\"\nexport module m;"
"exp\\\nort import:l[[rename]];"
"import<<=3;import a b d e d e f e;"
@@ -1020,52 +1012,4 @@ TEST(MinimizeSourceToDependencyDirectivesTest, TokensBeforeEOF) {
EXPECT_STREQ("#ifndef A\n#define A\n#endif\n<TokBeforeEOF>\n", Out.data());
}
-TEST(MinimizeSourceToDependencyDirectivesTest, CPlusPlus14PPNumber) {
- SmallVector<char, 128> Out;
- SmallVector<dependency_directives_scan::Token, 4> Tokens;
- SmallVector<Directive, 4> Directives;
- LangOptions LangOpts;
-
- StringRef Source = R"(
-#if 123'124
-#endif
-)";
-
- LangOpts.CPlusPlus14 = true;
- ASSERT_FALSE(minimizeSourceToDependencyDirectives(Source, Out, Tokens,
- Directives, LangOpts));
- EXPECT_STREQ("#if 123'124\n#endif\n", Out.data());
- ASSERT_EQ(Directives.size(), 3u);
- EXPECT_EQ(Directives[0].Kind, dependency_directives_scan::pp_if);
- EXPECT_EQ(Directives[1].Kind, dependency_directives_scan::pp_endif);
- EXPECT_EQ(Directives[2].Kind, dependency_directives_scan::pp_eof);
- ASSERT_EQ(Tokens.size(), 7u);
-
- ASSERT_TRUE(Tokens[0].is(tok::hash));
- ASSERT_TRUE(Tokens[1].is(tok::raw_identifier)); // "if"
- ASSERT_TRUE(Tokens[2].is(tok::numeric_constant)); // 123'124
- ASSERT_TRUE(Tokens[3].is(tok::eod));
- ASSERT_TRUE(Tokens[4].is(tok::hash));
- ASSERT_TRUE(Tokens[5].is(tok::raw_identifier)); // #endif
- ASSERT_TRUE(Tokens[6].is(tok::eod));
-
- LangOpts.CPlusPlus14 = false;
- ASSERT_FALSE(minimizeSourceToDependencyDirectives(Source, Out, Tokens,
- Directives, LangOpts));
- EXPECT_STREQ("#if 123'124\n#endif\n", Out.data());
- ASSERT_EQ(Directives.size(), 3u);
- EXPECT_EQ(Directives[0].Kind, dependency_directives_scan::pp_if);
- EXPECT_EQ(Directives[1].Kind, dependency_directives_scan::pp_endif);
- EXPECT_EQ(Directives[2].Kind, dependency_directives_scan::pp_eof);
- ASSERT_EQ(Tokens.size(), 8u);
- ASSERT_TRUE(Tokens[0].is(tok::hash));
- ASSERT_TRUE(Tokens[1].is(tok::raw_identifier)); // "if"
- ASSERT_TRUE(Tokens[2].is(tok::numeric_constant)); // 123
- ASSERT_TRUE(Tokens[3].is(tok::unknown)); // '124
- ASSERT_TRUE(Tokens[4].is(tok::eod));
- ASSERT_TRUE(Tokens[5].is(tok::hash));
- ASSERT_TRUE(Tokens[6].is(tok::raw_identifier)); // #endif
- ASSERT_TRUE(Tokens[7].is(tok::eod));
-}
-
} // end anonymous namespace
diff --git a/clang/unittests/Lex/PPDependencyDirectivesTest.cpp b/clang/unittests/Lex/PPDependencyDirectivesTest.cpp
index 410f378..6ff87f7 100644
--- a/clang/unittests/Lex/PPDependencyDirectivesTest.cpp
+++ b/clang/unittests/Lex/PPDependencyDirectivesTest.cpp
@@ -104,7 +104,6 @@ TEST_F(PPDependencyDirectivesTest, MacroGuard) {
SmallVector<dependency_directives_scan::Directive> Directives;
};
SmallVector<std::unique_ptr<DepDirectives>> DepDirectivesObjects;
- LangOptions LangOpts;
auto getDependencyDirectives = [&](FileEntryRef File)
-> std::optional<ArrayRef<dependency_directives_scan::Directive>> {
@@ -112,7 +111,7 @@ TEST_F(PPDependencyDirectivesTest, MacroGuard) {
StringRef Input = (*FileMgr.getBufferForFile(File))->getBuffer();
bool Err = scanSourceForDependencyDirectives(
Input, DepDirectivesObjects.back()->Tokens,
- DepDirectivesObjects.back()->Directives, LangOpts);
+ DepDirectivesObjects.back()->Directives);
EXPECT_FALSE(Err);
return llvm::ArrayRef(DepDirectivesObjects.back()->Directives);
};
diff --git a/clang/www/cxx_dr_status.html b/clang/www/cxx_dr_status.html
index b046468..4385744 100755
--- a/clang/www/cxx_dr_status.html
+++ b/clang/www/cxx_dr_status.html
@@ -10698,7 +10698,7 @@ and <I>POD class</I></td>
<td><a href="https://cplusplus.github.io/CWG/issues/1815.html">1815</a></td>
<td>CD4</td>
<td>Lifetime extension in aggregate initialization</td>
- <td class="unreleased" align="center">Clang 19</td>
+ <td class="none" align="center">No</td>
</tr>
<tr id="1816">
<td><a href="https://cplusplus.github.io/CWG/issues/1816.html">1816</a></td>