aboutsummaryrefslogtreecommitdiff
path: root/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/AST/ByteCode/Interp.cpp32
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp85
-rw-r--r--clang/lib/AST/ExprConstant.cpp83
-rw-r--r--clang/lib/Basic/SourceManager.cpp3
-rw-r--r--clang/lib/Basic/Targets/BPF.cpp1
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuilder.h34
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenClass.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp8
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp8
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp18
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp8
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp10
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp48
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenTypeCache.h48
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenTypes.cpp32
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp23
-rw-r--r--clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp4
-rw-r--r--clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp13
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp28
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp9
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp5
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp8
-rw-r--r--clang/lib/CodeGen/CGHLSLBuiltins.cpp32
-rw-r--r--clang/lib/Driver/Driver.cpp15
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp77
-rw-r--r--clang/lib/Driver/ToolChains/Fuchsia.cpp4
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp5
-rw-r--r--clang/lib/Frontend/TextDiagnostic.cpp10
-rw-r--r--clang/lib/Headers/avx512vlbwintrin.h20
-rw-r--r--clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h123
-rw-r--r--clang/lib/Interpreter/InterpreterValuePrinter.cpp5
-rw-r--r--clang/lib/Sema/SemaConcept.cpp9
-rw-r--r--clang/lib/Sema/SemaHLSL.cpp1
38 files changed, 603 insertions, 228 deletions
diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp
index d640be0..a2fb0fb 100644
--- a/clang/lib/AST/ByteCode/Interp.cpp
+++ b/clang/lib/AST/ByteCode/Interp.cpp
@@ -1651,8 +1651,8 @@ static bool GetDynamicDecl(InterpState &S, CodePtr OpPC, Pointer TypePtr,
QualType DynamicType = TypePtr.getType();
if (TypePtr.isStatic() || TypePtr.isConst()) {
- const VarDecl *VD = TypePtr.getDeclDesc()->asVarDecl();
- if (!VD->isConstexpr()) {
+ if (const VarDecl *VD = TypePtr.getDeclDesc()->asVarDecl();
+ VD && !VD->isConstexpr()) {
const Expr *E = S.Current->getExpr(OpPC);
APValue V = TypePtr.toAPValue(S.getASTContext());
QualType TT = S.getASTContext().getLValueReferenceType(DynamicType);
@@ -1683,20 +1683,6 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset);
const FunctionDecl *Callee = Func->getDecl();
- if (!Func->isFullyCompiled())
- compileFunction(S, Func);
-
- // C++2a [class.abstract]p6:
- // the effect of making a virtual call to a pure virtual function [...] is
- // undefined
- if (Callee->isPureVirtual()) {
- S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_pure_virtual_call,
- 1)
- << Callee;
- S.Note(Callee->getLocation(), diag::note_declared_at);
- return false;
- }
-
const CXXRecordDecl *DynamicDecl = nullptr;
if (!GetDynamicDecl(S, OpPC, ThisPtr, DynamicDecl))
return false;
@@ -1706,7 +1692,8 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
const auto *InitialFunction = cast<CXXMethodDecl>(Callee);
const CXXMethodDecl *Overrider;
- if (StaticDecl != DynamicDecl) {
+ if (StaticDecl != DynamicDecl &&
+ !llvm::is_contained(S.InitializingBlocks, ThisPtr.block())) {
if (!DynamicDecl->isDerivedFrom(StaticDecl))
return false;
Overrider = S.getContext().getOverridingFunction(DynamicDecl, StaticDecl,
@@ -1716,6 +1703,17 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func,
Overrider = InitialFunction;
}
+ // C++2a [class.abstract]p6:
+ // the effect of making a virtual call to a pure virtual function [...] is
+ // undefined
+ if (Overrider->isPureVirtual()) {
+ S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_pure_virtual_call,
+ 1)
+ << Callee;
+ S.Note(Callee->getLocation(), diag::note_declared_at);
+ return false;
+ }
+
if (Overrider != InitialFunction) {
// DR1872: An instantiated virtual constexpr function can't be called in a
// constant expression (prior to C++20). We can still constant-fold such a
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 8f23001..b3ab82d 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -859,7 +859,7 @@ static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC,
APSInt RHS = popToAPSInt(S.Stk, RHST);
APSInt LHS = popToAPSInt(S.Stk, LHST);
- if (CarryOutPtr.isDummy())
+ if (CarryOutPtr.isDummy() || !CarryOutPtr.isBlockPointer())
return false;
APSInt CarryOut;
@@ -3296,6 +3296,60 @@ static bool interp__builtin_vec_set(InterpState &S, CodePtr OpPC,
return true;
}
+static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B,
+ bool IsUnsigned) {
+ switch (Imm & 0x7) {
+ case 0x00: // _MM_CMPINT_EQ
+ return (A == B);
+ case 0x01: // _MM_CMPINT_LT
+ return IsUnsigned ? A.ult(B) : A.slt(B);
+ case 0x02: // _MM_CMPINT_LE
+ return IsUnsigned ? A.ule(B) : A.sle(B);
+ case 0x03: // _MM_CMPINT_FALSE
+ return false;
+ case 0x04: // _MM_CMPINT_NE
+ return (A != B);
+ case 0x05: // _MM_CMPINT_NLT
+ return IsUnsigned ? A.ugt(B) : A.sgt(B);
+ case 0x06: // _MM_CMPINT_NLE
+ return IsUnsigned ? A.uge(B) : A.sge(B);
+ case 0x07: // _MM_CMPINT_TRUE
+ return true;
+ default:
+ llvm_unreachable("Invalid Op");
+ }
+}
+
+static bool interp__builtin_ia32_cmp_mask(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call, unsigned ID,
+ bool IsUnsigned) {
+ assert(Call->getNumArgs() == 4);
+
+ APSInt Mask = popToAPSInt(S, Call->getArg(3));
+ APSInt Opcode = popToAPSInt(S, Call->getArg(2));
+ unsigned CmpOp = static_cast<unsigned>(Opcode.getZExtValue());
+ const Pointer &RHS = S.Stk.pop<Pointer>();
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+
+ assert(LHS.getNumElems() == RHS.getNumElems());
+
+ APInt RetMask = APInt::getZero(LHS.getNumElems());
+ unsigned VectorLen = LHS.getNumElems();
+ PrimType ElemT = LHS.getFieldDesc()->getPrimType();
+
+ for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
+ APSInt A, B;
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ A = LHS.elem<T>(ElemNum).toAPSInt();
+ B = RHS.elem<T>(ElemNum).toAPSInt();
+ });
+ RetMask.setBitVal(ElemNum,
+ Mask[ElemNum] && evalICmpImm(CmpOp, A, B, IsUnsigned));
+ }
+ pushInteger(S, RetMask, Call->getType());
+ return true;
+}
+
static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
const CallExpr *Call) {
assert(Call->getNumArgs() == 1);
@@ -4488,6 +4542,35 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_vec_set_v4di:
return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
+ case X86::BI__builtin_ia32_cmpb128_mask:
+ case X86::BI__builtin_ia32_cmpw128_mask:
+ case X86::BI__builtin_ia32_cmpd128_mask:
+ case X86::BI__builtin_ia32_cmpq128_mask:
+ case X86::BI__builtin_ia32_cmpb256_mask:
+ case X86::BI__builtin_ia32_cmpw256_mask:
+ case X86::BI__builtin_ia32_cmpd256_mask:
+ case X86::BI__builtin_ia32_cmpq256_mask:
+ case X86::BI__builtin_ia32_cmpb512_mask:
+ case X86::BI__builtin_ia32_cmpw512_mask:
+ case X86::BI__builtin_ia32_cmpd512_mask:
+ case X86::BI__builtin_ia32_cmpq512_mask:
+ return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
+ /*IsUnsigned=*/false);
+
+ case X86::BI__builtin_ia32_ucmpb128_mask:
+ case X86::BI__builtin_ia32_ucmpw128_mask:
+ case X86::BI__builtin_ia32_ucmpd128_mask:
+ case X86::BI__builtin_ia32_ucmpq128_mask:
+ case X86::BI__builtin_ia32_ucmpb256_mask:
+ case X86::BI__builtin_ia32_ucmpw256_mask:
+ case X86::BI__builtin_ia32_ucmpd256_mask:
+ case X86::BI__builtin_ia32_ucmpq256_mask:
+ case X86::BI__builtin_ia32_ucmpb512_mask:
+ case X86::BI__builtin_ia32_ucmpw512_mask:
+ case X86::BI__builtin_ia32_ucmpd512_mask:
+ case X86::BI__builtin_ia32_ucmpq512_mask:
+ return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
+ /*IsUnsigned=*/true);
case X86::BI__builtin_ia32_pslldqi128_byteshift:
case X86::BI__builtin_ia32_pslldqi256_byteshift:
case X86::BI__builtin_ia32_pslldqi512_byteshift:
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 29ee089..d0404b9 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -15766,6 +15766,89 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
unsigned Idx = static_cast<unsigned>(IdxAPS.getZExtValue() & (N - 1));
return Success(Vec.getVectorElt(Idx).getInt(), E);
}
+
+ case clang::X86::BI__builtin_ia32_cmpb128_mask:
+ case clang::X86::BI__builtin_ia32_cmpw128_mask:
+ case clang::X86::BI__builtin_ia32_cmpd128_mask:
+ case clang::X86::BI__builtin_ia32_cmpq128_mask:
+ case clang::X86::BI__builtin_ia32_cmpb256_mask:
+ case clang::X86::BI__builtin_ia32_cmpw256_mask:
+ case clang::X86::BI__builtin_ia32_cmpd256_mask:
+ case clang::X86::BI__builtin_ia32_cmpq256_mask:
+ case clang::X86::BI__builtin_ia32_cmpb512_mask:
+ case clang::X86::BI__builtin_ia32_cmpw512_mask:
+ case clang::X86::BI__builtin_ia32_cmpd512_mask:
+ case clang::X86::BI__builtin_ia32_cmpq512_mask:
+ case clang::X86::BI__builtin_ia32_ucmpb128_mask:
+ case clang::X86::BI__builtin_ia32_ucmpw128_mask:
+ case clang::X86::BI__builtin_ia32_ucmpd128_mask:
+ case clang::X86::BI__builtin_ia32_ucmpq128_mask:
+ case clang::X86::BI__builtin_ia32_ucmpb256_mask:
+ case clang::X86::BI__builtin_ia32_ucmpw256_mask:
+ case clang::X86::BI__builtin_ia32_ucmpd256_mask:
+ case clang::X86::BI__builtin_ia32_ucmpq256_mask:
+ case clang::X86::BI__builtin_ia32_ucmpb512_mask:
+ case clang::X86::BI__builtin_ia32_ucmpw512_mask:
+ case clang::X86::BI__builtin_ia32_ucmpd512_mask:
+ case clang::X86::BI__builtin_ia32_ucmpq512_mask: {
+ assert(E->getNumArgs() == 4);
+
+ bool IsUnsigned =
+ (BuiltinOp >= clang::X86::BI__builtin_ia32_ucmpb128_mask &&
+ BuiltinOp <= clang::X86::BI__builtin_ia32_ucmpq512_mask);
+
+ APValue LHS, RHS;
+ APSInt Mask, Opcode;
+ if (!EvaluateVector(E->getArg(0), LHS, Info) ||
+ !EvaluateVector(E->getArg(1), RHS, Info) ||
+ !EvaluateInteger(E->getArg(2), Opcode, Info) ||
+ !EvaluateInteger(E->getArg(3), Mask, Info))
+ return false;
+
+ assert(LHS.getVectorLength() == RHS.getVectorLength());
+
+ unsigned VectorLen = LHS.getVectorLength();
+ unsigned RetWidth = Mask.getBitWidth();
+
+ APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true);
+
+ for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
+ const APSInt &A = LHS.getVectorElt(ElemNum).getInt();
+ const APSInt &B = RHS.getVectorElt(ElemNum).getInt();
+ bool Result = false;
+
+ switch (Opcode.getExtValue() & 0x7) {
+ case 0: // _MM_CMPINT_EQ
+ Result = (A == B);
+ break;
+ case 1: // _MM_CMPINT_LT
+ Result = IsUnsigned ? A.ult(B) : A.slt(B);
+ break;
+ case 2: // _MM_CMPINT_LE
+ Result = IsUnsigned ? A.ule(B) : A.sle(B);
+ break;
+ case 3: // _MM_CMPINT_FALSE
+ Result = false;
+ break;
+ case 4: // _MM_CMPINT_NE
+ Result = (A != B);
+ break;
+ case 5: // _MM_CMPINT_NLT (>=)
+ Result = IsUnsigned ? A.uge(B) : A.sge(B);
+ break;
+ case 6: // _MM_CMPINT_NLE (>)
+ Result = IsUnsigned ? A.ugt(B) : A.sgt(B);
+ break;
+ case 7: // _MM_CMPINT_TRUE
+ Result = true;
+ break;
+ }
+
+ RetMask.setBitVal(ElemNum, Mask[ElemNum] && Result);
+ }
+
+ return Success(APValue(RetMask), E);
+ }
}
}
diff --git a/clang/lib/Basic/SourceManager.cpp b/clang/lib/Basic/SourceManager.cpp
index d8ec837..938c648 100644
--- a/clang/lib/Basic/SourceManager.cpp
+++ b/clang/lib/Basic/SourceManager.cpp
@@ -608,8 +608,7 @@ FileID SourceManager::createFileIDImpl(ContentCache &File, StringRef Filename,
return FileID::get(LoadedID);
}
unsigned FileSize = File.getSize();
- llvm::ErrorOr<bool> NeedConversion =
- llvm::needConversion(Filename.str().c_str());
+ llvm::ErrorOr<bool> NeedConversion = llvm::needConversion(Filename);
if (NeedConversion && *NeedConversion) {
// Buffer size may increase due to potential z/OS EBCDIC to UTF-8
// conversion.
diff --git a/clang/lib/Basic/Targets/BPF.cpp b/clang/lib/Basic/Targets/BPF.cpp
index 0411bcc..8de1083 100644
--- a/clang/lib/Basic/Targets/BPF.cpp
+++ b/clang/lib/Basic/Targets/BPF.cpp
@@ -75,6 +75,7 @@ void BPFTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__BPF_FEATURE_GOTOL");
Builder.defineMacro("__BPF_FEATURE_ST");
Builder.defineMacro("__BPF_FEATURE_LOAD_ACQ_STORE_REL");
+ Builder.defineMacro("__BPF_FEATURE_GOTOX");
}
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index 50d585d..e5066fa 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -108,11 +108,11 @@ public:
cir::LongDoubleType getLongDoubleTy(const llvm::fltSemantics &format) const {
if (&format == &llvm::APFloat::IEEEdouble())
- return cir::LongDoubleType::get(getContext(), typeCache.DoubleTy);
+ return cir::LongDoubleType::get(getContext(), typeCache.doubleTy);
if (&format == &llvm::APFloat::x87DoubleExtended())
- return cir::LongDoubleType::get(getContext(), typeCache.FP80Ty);
+ return cir::LongDoubleType::get(getContext(), typeCache.fP80Ty);
if (&format == &llvm::APFloat::IEEEquad())
- return cir::LongDoubleType::get(getContext(), typeCache.FP128Ty);
+ return cir::LongDoubleType::get(getContext(), typeCache.fP128Ty);
if (&format == &llvm::APFloat::PPCDoubleDouble())
llvm_unreachable("NYI: PPC double-double format for long double");
llvm_unreachable("Unsupported format for long double");
@@ -258,17 +258,17 @@ public:
}
}
- cir::VoidType getVoidTy() { return typeCache.VoidTy; }
+ cir::VoidType getVoidTy() { return typeCache.voidTy; }
- cir::IntType getSInt8Ty() { return typeCache.SInt8Ty; }
- cir::IntType getSInt16Ty() { return typeCache.SInt16Ty; }
- cir::IntType getSInt32Ty() { return typeCache.SInt32Ty; }
- cir::IntType getSInt64Ty() { return typeCache.SInt64Ty; }
+ cir::IntType getSInt8Ty() { return typeCache.sInt8Ty; }
+ cir::IntType getSInt16Ty() { return typeCache.sInt16Ty; }
+ cir::IntType getSInt32Ty() { return typeCache.sInt32Ty; }
+ cir::IntType getSInt64Ty() { return typeCache.sInt64Ty; }
- cir::IntType getUInt8Ty() { return typeCache.UInt8Ty; }
- cir::IntType getUInt16Ty() { return typeCache.UInt16Ty; }
- cir::IntType getUInt32Ty() { return typeCache.UInt32Ty; }
- cir::IntType getUInt64Ty() { return typeCache.UInt64Ty; }
+ cir::IntType getUInt8Ty() { return typeCache.uInt8Ty; }
+ cir::IntType getUInt16Ty() { return typeCache.uInt16Ty; }
+ cir::IntType getUInt32Ty() { return typeCache.uInt32Ty; }
+ cir::IntType getUInt64Ty() { return typeCache.uInt64Ty; }
cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal);
@@ -280,21 +280,21 @@ public:
llvm::APFloat fpVal);
bool isInt8Ty(mlir::Type i) {
- return i == typeCache.UInt8Ty || i == typeCache.SInt8Ty;
+ return i == typeCache.uInt8Ty || i == typeCache.sInt8Ty;
}
bool isInt16Ty(mlir::Type i) {
- return i == typeCache.UInt16Ty || i == typeCache.SInt16Ty;
+ return i == typeCache.uInt16Ty || i == typeCache.sInt16Ty;
}
bool isInt32Ty(mlir::Type i) {
- return i == typeCache.UInt32Ty || i == typeCache.SInt32Ty;
+ return i == typeCache.uInt32Ty || i == typeCache.sInt32Ty;
}
bool isInt64Ty(mlir::Type i) {
- return i == typeCache.UInt64Ty || i == typeCache.SInt64Ty;
+ return i == typeCache.uInt64Ty || i == typeCache.sInt64Ty;
}
bool isInt(mlir::Type i) { return mlir::isa<cir::IntType>(i); }
// Fetch the type representing a pointer to unsigned int8 values.
- cir::PointerType getUInt8PtrTy() { return typeCache.UInt8PtrTy; }
+ cir::PointerType getUInt8PtrTy() { return typeCache.uInt8PtrTy; }
/// Get a CIR anonymous record type.
cir::RecordType getAnonRecordTy(llvm::ArrayRef<mlir::Type> members,
diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
index 5046e09..a829678 100644
--- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
@@ -362,7 +362,7 @@ static Address applyNonVirtualAndVirtualOffset(
// not bytes. So the pointer must be cast to a byte pointer and back.
mlir::Value ptr = addr.getPointer();
- mlir::Type charPtrType = cgf.cgm.UInt8PtrTy;
+ mlir::Type charPtrType = cgf.cgm.uInt8PtrTy;
mlir::Value charPtr = cgf.getBuilder().createBitcast(ptr, charPtrType);
mlir::Value adjusted = cir::PtrStrideOp::create(
cgf.getBuilder(), loc, charPtrType, charPtr, baseOffset);
@@ -1105,7 +1105,7 @@ mlir::Value CIRGenFunction::getVTTParameter(GlobalDecl gd, bool forVirtualBase,
// We're the complete constructor, so get the VTT by name.
cir::GlobalOp vtt = cgm.getVTables().getAddrOfVTT(rd);
return builder.createVTTAddrPoint(
- loc, builder.getPointerTo(cgm.VoidPtrTy),
+ loc, builder.getPointerTo(cgm.voidPtrTy),
mlir::FlatSymbolRefAttr::get(vtt.getSymNameAttr()), subVTTIndex);
}
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp
index 8723a6e..930ae55 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp
@@ -55,7 +55,7 @@ cir::CallOp CIRGenFunction::emitCoroIDBuiltinCall(mlir::Location loc,
if (!builtin) {
fnOp = cgm.createCIRBuiltinFunction(
loc, cgm.builtinCoroId,
- cir::FuncType::get({int32Ty, VoidPtrTy, VoidPtrTy, VoidPtrTy}, int32Ty),
+ cir::FuncType::get({int32Ty, voidPtrTy, voidPtrTy, voidPtrTy}, int32Ty),
/*FD=*/nullptr);
assert(fnOp && "should always succeed");
} else {
@@ -75,7 +75,7 @@ cir::CallOp CIRGenFunction::emitCoroAllocBuiltinCall(mlir::Location loc) {
cir::FuncOp fnOp;
if (!builtin) {
fnOp = cgm.createCIRBuiltinFunction(loc, cgm.builtinCoroAlloc,
- cir::FuncType::get({UInt32Ty}, boolTy),
+ cir::FuncType::get({uInt32Ty}, boolTy),
/*fd=*/nullptr);
assert(fnOp && "should always succeed");
} else {
@@ -95,7 +95,7 @@ CIRGenFunction::emitCoroBeginBuiltinCall(mlir::Location loc,
if (!builtin) {
fnOp = cgm.createCIRBuiltinFunction(
loc, cgm.builtinCoroBegin,
- cir::FuncType::get({UInt32Ty, VoidPtrTy}, VoidPtrTy),
+ cir::FuncType::get({uInt32Ty, voidPtrTy}, voidPtrTy),
/*fd=*/nullptr);
assert(fnOp && "should always succeed");
} else {
@@ -110,7 +110,7 @@ CIRGenFunction::emitCoroBeginBuiltinCall(mlir::Location loc,
mlir::LogicalResult
CIRGenFunction::emitCoroutineBody(const CoroutineBodyStmt &s) {
mlir::Location openCurlyLoc = getLoc(s.getBeginLoc());
- cir::ConstantOp nullPtrCst = builder.getNullPtr(VoidPtrTy, openCurlyLoc);
+ cir::ConstantOp nullPtrCst = builder.getNullPtr(voidPtrTy, openCurlyLoc);
auto fn = mlir::cast<cir::FuncOp>(curFn);
fn.setCoroutine(true);
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 5667273..aeea0ef 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -80,13 +80,13 @@ CIRGenFunction::emitAutoVarAlloca(const VarDecl &d,
assert(!cir::MissingFeatures::openMP());
if (!didCallStackSave) {
// Save the stack.
- cir::PointerType defaultTy = AllocaInt8PtrTy;
+ cir::PointerType defaultTy = allocaInt8PtrTy;
CharUnits align = CharUnits::fromQuantity(
cgm.getDataLayout().getAlignment(defaultTy, false));
Address stack = createTempAlloca(defaultTy, align, loc, "saved_stack");
mlir::Value v = builder.createStackSave(loc, defaultTy);
- assert(v.getType() == AllocaInt8PtrTy);
+ assert(v.getType() == allocaInt8PtrTy);
builder.createStore(loc, v, stack);
didCallStackSave = true;
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index df6ee56..5ccb431 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -2529,7 +2529,7 @@ CIRGenFunction::emitConditionalBlocks(const AbstractConditionalOperator *e,
// If both arms are void, so be it.
if (!yieldTy)
- yieldTy = VoidTy;
+ yieldTy = voidTy;
// Insert required yields.
for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index 8fe0d9b4..3d3030c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -490,7 +490,7 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
for (uint64_t i = 0; i != numInitElements; ++i) {
// Advance to the next element.
if (i > 0) {
- one = builder.getConstantInt(loc, cgf.PtrDiffTy, i);
+ one = builder.getConstantInt(loc, cgf.ptrDiffTy, i);
element = builder.createPtrStride(loc, begin, one);
}
@@ -512,7 +512,7 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
cgf.getTypes().isZeroInitializable(elementType))) {
// Advance to the start of the rest of the array.
if (numInitElements) {
- one = builder.getConstantInt(loc, cgf.PtrDiffTy, 1);
+ one = builder.getConstantInt(loc, cgf.ptrDiffTy, 1);
element = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
element, one);
}
@@ -526,7 +526,7 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
// Compute the end of array
cir::ConstantOp numArrayElementsConst = builder.getConstInt(
- loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), numArrayElements);
+ loc, mlir::cast<cir::IntType>(cgf.ptrDiffTy), numArrayElements);
mlir::Value end = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
begin, numArrayElementsConst);
@@ -563,7 +563,7 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
// Advance pointer and store them to temporary variable
cir::ConstantOp one = builder.getConstInt(
- loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), 1);
+ loc, mlir::cast<cir::IntType>(cgf.ptrDiffTy), 1);
auto nextElement = cir::PtrStrideOp::create(
builder, loc, cirElementPtrType, currentElement, one);
cgf.emitStoreThroughLValue(RValue::get(nextElement), tmpLV);
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
index 7a35382..9dd9b6d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp
@@ -257,12 +257,12 @@ static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e,
if (!e->isArray()) {
CharUnits typeSize = cgf.getContext().getTypeSizeInChars(type);
sizeWithoutCookie = cgf.getBuilder().getConstant(
- loc, cir::IntAttr::get(cgf.SizeTy, typeSize.getQuantity()));
+ loc, cir::IntAttr::get(cgf.sizeTy, typeSize.getQuantity()));
return sizeWithoutCookie;
}
// The width of size_t.
- unsigned sizeWidth = cgf.cgm.getDataLayout().getTypeSizeInBits(cgf.SizeTy);
+ unsigned sizeWidth = cgf.cgm.getDataLayout().getTypeSizeInBits(cgf.sizeTy);
// The number of elements can be have an arbitrary integer type;
// essentially, we need to multiply it by a constant factor, add a
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
index 928e5aa..6af87a0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp
@@ -46,7 +46,7 @@ namespace {
class ConstExprEmitter;
static mlir::TypedAttr computePadding(CIRGenModule &cgm, CharUnits size) {
- mlir::Type eltTy = cgm.UCharTy;
+ mlir::Type eltTy = cgm.uCharTy;
clang::CharUnits::QuantityType arSize = size.getQuantity();
CIRGenBuilderTy &bld = cgm.getBuilder();
if (size > CharUnits::One()) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index db6878d..119314f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -762,9 +762,9 @@ public:
// FIXME(cir): For now lets pretend we shouldn't use the conversion
// intrinsics and insert a cast here unconditionally.
src = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, src,
- cgf.FloatTy);
+ cgf.floatTy);
srcType = cgf.getContext().FloatTy;
- mlirSrcType = cgf.FloatTy;
+ mlirSrcType = cgf.floatTy;
}
}
@@ -1738,7 +1738,7 @@ mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) {
//
// See more in `EmitSub` in CGExprScalar.cpp.
assert(!cir::MissingFeatures::llvmLoweringPtrDiffConsidersPointee());
- return cir::PtrDiffOp::create(builder, cgf.getLoc(ops.loc), cgf.PtrDiffTy,
+ return cir::PtrDiffOp::create(builder, cgf.getLoc(ops.loc), cgf.ptrDiffTy,
ops.lhs, ops.rhs);
}
@@ -2220,7 +2220,7 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
"sizeof operator for VariableArrayType",
e->getStmtClassName());
return builder.getConstant(
- loc, cir::IntAttr::get(cgf.cgm.UInt64Ty,
+ loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
llvm::APSInt(llvm::APInt(64, 1), true)));
}
} else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) {
@@ -2228,12 +2228,12 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
e->getSourceRange(), "sizeof operator for OpenMpRequiredSimdAlign",
e->getStmtClassName());
return builder.getConstant(
- loc, cir::IntAttr::get(cgf.cgm.UInt64Ty,
+ loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
llvm::APSInt(llvm::APInt(64, 1), true)));
}
return builder.getConstant(
- loc, cir::IntAttr::get(cgf.cgm.UInt64Ty,
+ loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
e->EvaluateKnownConstInt(cgf.getContext())));
}
@@ -2329,14 +2329,14 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator(
mlir::Value lhs = Visit(lhsExpr);
if (!lhs) {
- lhs = builder.getNullValue(cgf.VoidTy, loc);
+ lhs = builder.getNullValue(cgf.voidTy, loc);
lhsIsVoid = true;
}
mlir::Value rhs = Visit(rhsExpr);
if (lhsIsVoid) {
assert(!rhs && "lhs and rhs types must match");
- rhs = builder.getNullValue(cgf.VoidTy, loc);
+ rhs = builder.getNullValue(cgf.voidTy, loc);
}
return builder.createSelect(loc, condV, lhs, rhs);
@@ -2381,7 +2381,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator(
if (!insertPoints.empty()) {
// If both arms are void, so be it.
if (!yieldTy)
- yieldTy = cgf.VoidTy;
+ yieldTy = cgf.voidTy;
// Insert required yields.
for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index 58feb36..71ff20a 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -1008,7 +1008,7 @@ CIRGenFunction::emitArrayLength(const clang::ArrayType *origArrayType,
if (isa<VariableArrayType>(arrayType)) {
assert(cir::MissingFeatures::vlas());
cgm.errorNYI(*currSrcLoc, "VLAs");
- return builder.getConstInt(*currSrcLoc, SizeTy, 0);
+ return builder.getConstInt(*currSrcLoc, sizeTy, 0);
}
uint64_t countFromCLAs = 1;
@@ -1037,7 +1037,7 @@ CIRGenFunction::emitArrayLength(const clang::ArrayType *origArrayType,
}
baseType = eltType;
- return builder.getConstInt(*currSrcLoc, SizeTy, countFromCLAs);
+ return builder.getConstInt(*currSrcLoc, sizeTy, countFromCLAs);
}
mlir::Value CIRGenFunction::emitAlignmentAssumption(
@@ -1074,7 +1074,7 @@ CIRGenFunction::getVLASize(const VariableArrayType *type) {
elementType = type->getElementType();
mlir::Value vlaSize = vlaSizeMap[type->getSizeExpr()];
assert(vlaSize && "no size for VLA!");
- assert(vlaSize.getType() == SizeTy);
+ assert(vlaSize.getType() == sizeTy);
if (!numElements) {
numElements = vlaSize;
@@ -1188,7 +1188,7 @@ void CIRGenFunction::emitVariablyModifiedType(QualType type) {
// Always zexting here would be wrong if it weren't
// undefined behavior to have a negative bound.
// FIXME: What about when size's type is larger than size_t?
- entry = builder.createIntCast(size, SizeTy);
+ entry = builder.createIntCast(size, sizeTy);
}
}
type = vat->getElementType();
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index 88fedf1..f603f5ec 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -1846,13 +1846,13 @@ mlir::Value CIRGenItaniumCXXABI::getVirtualBaseClassOffset(
const CXXRecordDecl *classDecl, const CXXRecordDecl *baseClassDecl) {
CIRGenBuilderTy &builder = cgf.getBuilder();
mlir::Value vtablePtr = cgf.getVTablePtr(loc, thisAddr, classDecl);
- mlir::Value vtableBytePtr = builder.createBitcast(vtablePtr, cgm.UInt8PtrTy);
+ mlir::Value vtableBytePtr = builder.createBitcast(vtablePtr, cgm.uInt8PtrTy);
CharUnits vbaseOffsetOffset =
cgm.getItaniumVTableContext().getVirtualBaseOffsetOffset(classDecl,
baseClassDecl);
mlir::Value offsetVal =
builder.getSInt64(vbaseOffsetOffset.getQuantity(), loc);
- auto vbaseOffsetPtr = cir::PtrStrideOp::create(builder, loc, cgm.UInt8PtrTy,
+ auto vbaseOffsetPtr = cir::PtrStrideOp::create(builder, loc, cgm.uInt8PtrTy,
vtableBytePtr, offsetVal);
mlir::Value vbaseOffset;
@@ -1861,9 +1861,9 @@ mlir::Value CIRGenItaniumCXXABI::getVirtualBaseClassOffset(
cgm.errorNYI(loc, "getVirtualBaseClassOffset: relative layout");
} else {
mlir::Value offsetPtr = builder.createBitcast(
- vbaseOffsetPtr, builder.getPointerTo(cgm.PtrDiffTy));
+ vbaseOffsetPtr, builder.getPointerTo(cgm.ptrDiffTy));
vbaseOffset = builder.createLoad(
- loc, Address(offsetPtr, cgm.PtrDiffTy, cgf.getPointerAlign()));
+ loc, Address(offsetPtr, cgm.ptrDiffTy, cgf.getPointerAlign()));
}
return vbaseOffset;
}
@@ -2244,7 +2244,7 @@ Address CIRGenItaniumCXXABI::initializeArrayCookie(CIRGenFunction &cgf,
// Write the number of elements into the appropriate slot.
Address numElementsPtr =
- cookiePtr.withElementType(cgf.getBuilder(), cgf.SizeTy);
+ cookiePtr.withElementType(cgf.getBuilder(), cgf.sizeTy);
cgf.getBuilder().createStore(loc, numElements, numElementsPtr);
// Finally, compute a pointer to the actual data buffer by skipping
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 46adfe2..9f9b2db 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -67,28 +67,28 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
abi(createCXXABI(*this)), genTypes(*this), vtables(*this) {
// Initialize cached types
- VoidTy = cir::VoidType::get(&getMLIRContext());
- VoidPtrTy = cir::PointerType::get(VoidTy);
- SInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/true);
- SInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/true);
- SInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/true);
- SInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/true);
- SInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/true);
- UInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/false);
- UInt8PtrTy = cir::PointerType::get(UInt8Ty);
+ voidTy = cir::VoidType::get(&getMLIRContext());
+ voidPtrTy = cir::PointerType::get(voidTy);
+ sInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/true);
+ sInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/true);
+ sInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/true);
+ sInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/true);
+ sInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/true);
+ uInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/false);
+ uInt8PtrTy = cir::PointerType::get(uInt8Ty);
cirAllocaAddressSpace = getTargetCIRGenInfo().getCIRAllocaAddressSpace();
- UInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/false);
- UInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/false);
- UInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/false);
- UInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/false);
- FP16Ty = cir::FP16Type::get(&getMLIRContext());
- BFloat16Ty = cir::BF16Type::get(&getMLIRContext());
- FloatTy = cir::SingleType::get(&getMLIRContext());
- DoubleTy = cir::DoubleType::get(&getMLIRContext());
- FP80Ty = cir::FP80Type::get(&getMLIRContext());
- FP128Ty = cir::FP128Type::get(&getMLIRContext());
-
- AllocaInt8PtrTy = cir::PointerType::get(UInt8Ty, cirAllocaAddressSpace);
+ uInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/false);
+ uInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/false);
+ uInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/false);
+ uInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/false);
+ fP16Ty = cir::FP16Type::get(&getMLIRContext());
+ bFloat16Ty = cir::BF16Type::get(&getMLIRContext());
+ floatTy = cir::SingleType::get(&getMLIRContext());
+ doubleTy = cir::DoubleType::get(&getMLIRContext());
+ fP80Ty = cir::FP80Type::get(&getMLIRContext());
+ fP128Ty = cir::FP128Type::get(&getMLIRContext());
+
+ allocaInt8PtrTy = cir::PointerType::get(uInt8Ty, cirAllocaAddressSpace);
PointerAlignInBytes =
astContext
@@ -97,16 +97,16 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
.getQuantity();
const unsigned charSize = astContext.getTargetInfo().getCharWidth();
- UCharTy = cir::IntType::get(&getMLIRContext(), charSize, /*isSigned=*/false);
+ uCharTy = cir::IntType::get(&getMLIRContext(), charSize, /*isSigned=*/false);
// TODO(CIR): Should be updated once TypeSizeInfoAttr is upstreamed
const unsigned sizeTypeSize =
astContext.getTypeSize(astContext.getSignedSizeType());
SizeSizeInBytes = astContext.toCharUnitsFromBits(sizeTypeSize).getQuantity();
// In CIRGenTypeCache, UIntPtrTy and SizeType are fields of the same union
- UIntPtrTy =
+ uIntPtrTy =
cir::IntType::get(&getMLIRContext(), sizeTypeSize, /*isSigned=*/false);
- PtrDiffTy =
+ ptrDiffTy =
cir::IntType::get(&getMLIRContext(), sizeTypeSize, /*isSigned=*/true);
std::optional<cir::SourceLanguage> sourceLanguage = getCIRSourceLanguage();
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
index be063033..890f8a6 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
@@ -617,11 +617,11 @@ void OpenACCRecipeBuilderBase::createReductionRecipeCombiner(
if (const auto *cat = cgf.getContext().getAsConstantArrayType(origType)) {
// If we're in an array, we have to emit the combiner for each element of
// the array.
- auto itrTy = mlir::cast<cir::IntType>(cgf.PtrDiffTy);
+ auto itrTy = mlir::cast<cir::IntType>(cgf.ptrDiffTy);
auto itrPtrTy = cir::PointerType::get(itrTy);
mlir::Value zero =
- builder.getConstInt(loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), 0);
+ builder.getConstInt(loc, mlir::cast<cir::IntType>(cgf.ptrDiffTy), 0);
mlir::Value itr =
cir::AllocaOp::create(builder, loc, itrPtrTy, itrTy, "itr",
cgf.cgm.getSize(cgf.getPointerAlign()));
@@ -633,7 +633,7 @@ void OpenACCRecipeBuilderBase::createReductionRecipeCombiner(
[&](mlir::OpBuilder &b, mlir::Location loc) {
auto loadItr = cir::LoadOp::create(builder, loc, {itr});
mlir::Value arraySize = builder.getConstInt(
- loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), cat->getZExtSize());
+ loc, mlir::cast<cir::IntType>(cgf.ptrDiffTy), cat->getZExtSize());
auto cmp = builder.createCompare(loc, cir::CmpOpKind::lt, loadItr,
arraySize);
builder.createCondition(cmp);
diff --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
index ff5842c..0f63e91 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
+++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
@@ -26,47 +26,47 @@ struct CIRGenTypeCache {
CIRGenTypeCache() {}
// ClangIR void type
- cir::VoidType VoidTy;
+ cir::VoidType voidTy;
// ClangIR signed integral types of common sizes
- cir::IntType SInt8Ty;
- cir::IntType SInt16Ty;
- cir::IntType SInt32Ty;
- cir::IntType SInt64Ty;
- cir::IntType SInt128Ty;
+ cir::IntType sInt8Ty;
+ cir::IntType sInt16Ty;
+ cir::IntType sInt32Ty;
+ cir::IntType sInt64Ty;
+ cir::IntType sInt128Ty;
// ClangIR unsigned integral type of common sizes
- cir::IntType UInt8Ty;
- cir::IntType UInt16Ty;
- cir::IntType UInt32Ty;
- cir::IntType UInt64Ty;
- cir::IntType UInt128Ty;
+ cir::IntType uInt8Ty;
+ cir::IntType uInt16Ty;
+ cir::IntType uInt32Ty;
+ cir::IntType uInt64Ty;
+ cir::IntType uInt128Ty;
// ClangIR floating-point types with fixed formats
- cir::FP16Type FP16Ty;
- cir::BF16Type BFloat16Ty;
- cir::SingleType FloatTy;
- cir::DoubleType DoubleTy;
- cir::FP80Type FP80Ty;
- cir::FP128Type FP128Ty;
+ cir::FP16Type fP16Ty;
+ cir::BF16Type bFloat16Ty;
+ cir::SingleType floatTy;
+ cir::DoubleType doubleTy;
+ cir::FP80Type fP80Ty;
+ cir::FP128Type fP128Ty;
/// ClangIR char
- mlir::Type UCharTy;
+ mlir::Type uCharTy;
/// intptr_t, size_t, and ptrdiff_t, which we assume are the same size.
union {
- mlir::Type UIntPtrTy;
- mlir::Type SizeTy;
+ mlir::Type uIntPtrTy;
+ mlir::Type sizeTy;
};
- mlir::Type PtrDiffTy;
+ mlir::Type ptrDiffTy;
/// void* in address space 0
- cir::PointerType VoidPtrTy;
- cir::PointerType UInt8PtrTy;
+ cir::PointerType voidPtrTy;
+ cir::PointerType uInt8PtrTy;
/// void* in alloca address space
- cir::PointerType AllocaInt8PtrTy;
+ cir::PointerType allocaInt8PtrTy;
/// The size and alignment of a pointer into the generic address space.
union {
diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
index d1b91d0..03618d4 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
@@ -71,7 +71,7 @@ mlir::Type CIRGenTypes::convertFunctionTypeInternal(QualType qft) {
if (!isFuncTypeConvertible(ft)) {
cgm.errorNYI(SourceLocation(), "function type involving an incomplete type",
qft);
- return cir::FuncType::get(SmallVector<mlir::Type, 1>{}, cgm.VoidTy);
+ return cir::FuncType::get(SmallVector<mlir::Type, 1>{}, cgm.voidTy);
}
const CIRGenFunctionInfo *fi;
@@ -298,7 +298,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
switch (cast<BuiltinType>(ty)->getKind()) {
// void
case BuiltinType::Void:
- resultType = cgm.VoidTy;
+ resultType = cgm.voidTy;
break;
// bool
@@ -338,42 +338,42 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
// Floating-point types
case BuiltinType::Float16:
- resultType = cgm.FP16Ty;
+ resultType = cgm.fP16Ty;
break;
case BuiltinType::Half:
if (astContext.getLangOpts().NativeHalfType ||
!astContext.getTargetInfo().useFP16ConversionIntrinsics()) {
- resultType = cgm.FP16Ty;
+ resultType = cgm.fP16Ty;
} else {
cgm.errorNYI(SourceLocation(), "processing of built-in type", type);
- resultType = cgm.SInt32Ty;
+ resultType = cgm.sInt32Ty;
}
break;
case BuiltinType::BFloat16:
- resultType = cgm.BFloat16Ty;
+ resultType = cgm.bFloat16Ty;
break;
case BuiltinType::Float:
assert(&astContext.getFloatTypeSemantics(type) ==
&llvm::APFloat::IEEEsingle() &&
"ClangIR NYI: 'float' in a format other than IEEE 32-bit");
- resultType = cgm.FloatTy;
+ resultType = cgm.floatTy;
break;
case BuiltinType::Double:
assert(&astContext.getFloatTypeSemantics(type) ==
&llvm::APFloat::IEEEdouble() &&
"ClangIR NYI: 'double' in a format other than IEEE 64-bit");
- resultType = cgm.DoubleTy;
+ resultType = cgm.doubleTy;
break;
case BuiltinType::LongDouble:
resultType =
builder.getLongDoubleTy(astContext.getFloatTypeSemantics(type));
break;
case BuiltinType::Float128:
- resultType = cgm.FP128Ty;
+ resultType = cgm.fP128Ty;
break;
case BuiltinType::Ibm128:
cgm.errorNYI(SourceLocation(), "processing of built-in type", type);
- resultType = cgm.SInt32Ty;
+ resultType = cgm.sInt32Ty;
break;
case BuiltinType::NullPtr:
@@ -386,7 +386,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
default:
cgm.errorNYI(SourceLocation(), "processing of built-in type", type);
- resultType = cgm.SInt32Ty;
+ resultType = cgm.sInt32Ty;
break;
}
break;
@@ -439,7 +439,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
// int X[] -> [0 x int], unless the element type is not sized. If it is
// unsized (e.g. an incomplete record) just use [0 x i8].
if (!cir::isSized(elemTy)) {
- elemTy = cgm.SInt8Ty;
+ elemTy = cgm.sInt8Ty;
}
resultType = cir::ArrayType::get(elemTy, 0);
@@ -454,7 +454,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
// i8 just to have a concrete type"
if (!cir::isSized(elemTy)) {
cgm.errorNYI(SourceLocation(), "arrays of undefined struct type", type);
- resultType = cgm.UInt32Ty;
+ resultType = cgm.uInt32Ty;
break;
}
@@ -477,7 +477,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
// Return a placeholder 'i32' type. This can be changed later when the
// type is defined (see UpdateCompletedType), but is likely to be the
// "right" answer.
- resultType = cgm.UInt32Ty;
+ resultType = cgm.uInt32Ty;
break;
}
@@ -490,7 +490,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
const auto *bitIntTy = cast<BitIntType>(type);
if (bitIntTy->getNumBits() > cir::IntType::maxBitwidth()) {
cgm.errorNYI(SourceLocation(), "large _BitInt type", type);
- resultType = cgm.SInt32Ty;
+ resultType = cgm.sInt32Ty;
} else {
resultType = cir::IntType::get(&getMLIRContext(), bitIntTy->getNumBits(),
bitIntTy->isSigned());
@@ -515,7 +515,7 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
default:
cgm.errorNYI(SourceLocation(), "processing of type",
type->getTypeClassName());
- resultType = cgm.SInt32Ty;
+ resultType = cgm.sInt32Ty;
break;
}
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index 2d2ef42..7ba03ce 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -286,14 +286,14 @@ void cir::ConditionOp::getSuccessorRegions(
// Parent is a loop: condition may branch to the body or to the parent op.
if (auto loopOp = dyn_cast<LoopOpInterface>(getOperation()->getParentOp())) {
regions.emplace_back(&loopOp.getBody(), loopOp.getBody().getArguments());
- regions.emplace_back(loopOp->getResults());
+ regions.emplace_back(getOperation(), loopOp->getResults());
}
assert(!cir::MissingFeatures::awaitOp());
}
MutableOperandRange
-cir::ConditionOp::getMutableSuccessorOperands(RegionBranchPoint point) {
+cir::ConditionOp::getMutableSuccessorOperands(RegionSuccessor point) {
// No values are yielded to the successor region.
return MutableOperandRange(getOperation(), 0, 0);
}
@@ -989,7 +989,8 @@ void cir::IfOp::getSuccessorRegions(mlir::RegionBranchPoint point,
SmallVectorImpl<RegionSuccessor> &regions) {
// The `then` and the `else` region branch back to the parent operation.
if (!point.isParent()) {
- regions.push_back(RegionSuccessor());
+ regions.push_back(
+ RegionSuccessor(getOperation(), getOperation()->getResults()));
return;
}
@@ -1039,7 +1040,7 @@ void cir::ScopeOp::getSuccessorRegions(
mlir::RegionBranchPoint point, SmallVectorImpl<RegionSuccessor> &regions) {
// The only region always branch back to the parent operation.
if (!point.isParent()) {
- regions.push_back(RegionSuccessor(getODSResults(0)));
+ regions.push_back(RegionSuccessor(getOperation(), getODSResults(0)));
return;
}
@@ -1124,7 +1125,8 @@ Block *cir::BrCondOp::getSuccessorForOperands(ArrayRef<Attribute> operands) {
void cir::CaseOp::getSuccessorRegions(
mlir::RegionBranchPoint point, SmallVectorImpl<RegionSuccessor> &regions) {
if (!point.isParent()) {
- regions.push_back(RegionSuccessor());
+ regions.push_back(
+ RegionSuccessor(getOperation(), getOperation()->getResults()));
return;
}
regions.push_back(RegionSuccessor(&getCaseRegion()));
@@ -1188,7 +1190,8 @@ static void printSwitchOp(OpAsmPrinter &p, cir::SwitchOp op,
void cir::SwitchOp::getSuccessorRegions(
mlir::RegionBranchPoint point, SmallVectorImpl<RegionSuccessor> &region) {
if (!point.isParent()) {
- region.push_back(RegionSuccessor());
+ region.push_back(
+ RegionSuccessor(getOperation(), getOperation()->getResults()));
return;
}
@@ -1402,7 +1405,8 @@ void cir::GlobalOp::getSuccessorRegions(
mlir::RegionBranchPoint point, SmallVectorImpl<RegionSuccessor> &regions) {
// The `ctor` and `dtor` regions always branch back to the parent operation.
if (!point.isParent()) {
- regions.push_back(RegionSuccessor());
+ regions.push_back(
+ RegionSuccessor(getOperation(), getOperation()->getResults()));
return;
}
@@ -1961,7 +1965,7 @@ void cir::TernaryOp::getSuccessorRegions(
mlir::RegionBranchPoint point, SmallVectorImpl<RegionSuccessor> &regions) {
// The `true` and the `false` region branch back to the parent operation.
if (!point.isParent()) {
- regions.push_back(RegionSuccessor(this->getODSResults(0)));
+ regions.push_back(RegionSuccessor(getOperation(), this->getODSResults(0)));
return;
}
@@ -2978,7 +2982,8 @@ void cir::TryOp::getSuccessorRegions(
llvm::SmallVectorImpl<mlir::RegionSuccessor> &regions) {
// The `try` and the `catchers` region branch back to the parent operation.
if (!point.isParent()) {
- regions.push_back(mlir::RegionSuccessor());
+ regions.push_back(
+ RegionSuccessor(getOperation(), getOperation()->getResults()));
return;
}
diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
index 21c96fe..ca7554e 100644
--- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
@@ -606,10 +606,12 @@ public:
// `cir.try_call`.
llvm::SmallVector<cir::CallOp, 4> callsToRewrite;
tryOp.getTryRegion().walk([&](CallOp op) {
+ if (op.getNothrow())
+ return;
+
// Only grab calls within immediate closest TryOp scope.
if (op->getParentOfType<cir::TryOp>() != tryOp)
return;
- assert(!cir::MissingFeatures::opCallExceptionAttr());
callsToRewrite.push_back(op);
});
diff --git a/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp b/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp
index 0ce5017..6de51f12 100644
--- a/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp
+++ b/clang/lib/CIR/Interfaces/CIRLoopOpInterface.cpp
@@ -17,7 +17,7 @@ namespace cir {
void LoopOpInterface::getLoopOpSuccessorRegions(
LoopOpInterface op, mlir::RegionBranchPoint point,
llvm::SmallVectorImpl<mlir::RegionSuccessor> &regions) {
- assert(point.isParent() || point.getRegionOrNull());
+ assert(point.isParent() || point.getTerminatorPredecessorOrNull());
// Branching to first region: go to condition or body (do-while).
if (point.isParent()) {
@@ -25,15 +25,18 @@ void LoopOpInterface::getLoopOpSuccessorRegions(
return;
}
+ mlir::Region *parentRegion =
+ point.getTerminatorPredecessorOrNull()->getParentRegion();
+
// Branching from condition: go to body or exit.
- if (&op.getCond() == point.getRegionOrNull()) {
- regions.emplace_back(mlir::RegionSuccessor(op->getResults()));
+ if (&op.getCond() == parentRegion) {
+ regions.emplace_back(mlir::RegionSuccessor(op, op->getResults()));
regions.emplace_back(&op.getBody(), op.getBody().getArguments());
return;
}
// Branching from body: go to step (for) or condition.
- if (&op.getBody() == point.getRegionOrNull()) {
+ if (&op.getBody() == parentRegion) {
// FIXME(cir): Should we consider break/continue statements here?
mlir::Region *afterBody =
(op.maybeGetStep() ? op.maybeGetStep() : &op.getCond());
@@ -42,7 +45,7 @@ void LoopOpInterface::getLoopOpSuccessorRegions(
}
// Branching from step: go to condition.
- if (op.maybeGetStep() == point.getRegionOrNull()) {
+ if (op.maybeGetStep() == parentRegion) {
regions.emplace_back(&op.getCond(), op.getCond().getArguments());
return;
}
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index aefc262..3c31314 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -800,16 +800,6 @@ static void addSanitizers(const Triple &TargetTriple,
MPM.addPass(DataFlowSanitizerPass(LangOpts.NoSanitizeFiles,
PB.getVirtualFileSystemPtr()));
}
-
- if (LangOpts.Sanitize.has(SanitizerKind::AllocToken)) {
- if (Level == OptimizationLevel::O0) {
- // The default pass builder only infers libcall function attrs when
- // optimizing, so we insert it here because we need it for accurate
- // memory allocation function detection.
- MPM.addPass(InferFunctionAttrsPass());
- }
- MPM.addPass(AllocTokenPass(getAllocTokenOptions(LangOpts, CodeGenOpts)));
- }
};
if (ClSanitizeOnOptimizerEarlyEP) {
PB.registerOptimizerEarlyEPCallback(
@@ -852,6 +842,23 @@ static void addSanitizers(const Triple &TargetTriple,
}
}
+static void addAllocTokenPass(const Triple &TargetTriple,
+ const CodeGenOptions &CodeGenOpts,
+ const LangOptions &LangOpts, PassBuilder &PB) {
+ PB.registerOptimizerLastEPCallback([&](ModulePassManager &MPM,
+ OptimizationLevel Level,
+ ThinOrFullLTOPhase) {
+ if (Level == OptimizationLevel::O0 &&
+ LangOpts.Sanitize.has(SanitizerKind::AllocToken)) {
+ // The default pass builder only infers libcall function attrs when
+ // optimizing, so we insert it here because we need it for accurate
+ // memory allocation function detection with -fsanitize=alloc-token.
+ MPM.addPass(InferFunctionAttrsPass());
+ }
+ MPM.addPass(AllocTokenPass(getAllocTokenOptions(LangOpts, CodeGenOpts)));
+ });
+}
+
void EmitAssemblyHelper::RunOptimizationPipeline(
BackendAction Action, std::unique_ptr<raw_pwrite_stream> &OS,
std::unique_ptr<llvm::ToolOutputFile> &ThinLinkOS, BackendConsumer *BC) {
@@ -1106,6 +1113,7 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
if (!IsThinLTOPostLink) {
addSanitizers(TargetTriple, CodeGenOpts, LangOpts, PB);
addKCFIPass(TargetTriple, LangOpts, PB);
+ addAllocTokenPass(TargetTriple, CodeGenOpts, LangOpts, PB);
}
if (std::optional<GCOVOptions> Options =
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index fd14cd6..b81e0d0 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -4506,6 +4506,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(AI);
}
+ case Builtin::BI__builtin_infer_alloc_token: {
+ llvm::MDNode *MDN = buildAllocToken(E);
+ llvm::Value *MDV = MetadataAsValue::get(getLLVMContext(), MDN);
+ llvm::Function *F =
+ CGM.getIntrinsic(llvm::Intrinsic::alloc_token_id, {IntPtrTy});
+ llvm::CallBase *TokenID = Builder.CreateCall(F, MDV);
+ return RValue::get(TokenID);
+ }
+
case Builtin::BIbzero:
case Builtin::BI__builtin_bzero: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index 6af8066..07a2cfb 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -1174,14 +1174,13 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
}
llvm::DIType *CGDebugInfo::CreateType(const BitIntType *Ty) {
-
StringRef Name = Ty->isUnsigned() ? "unsigned _BitInt" : "_BitInt";
llvm::dwarf::TypeKind Encoding = Ty->isUnsigned()
? llvm::dwarf::DW_ATE_unsigned
: llvm::dwarf::DW_ATE_signed;
-
return DBuilder.createBasicType(Name, CGM.getContext().getTypeSize(Ty),
- Encoding);
+ Encoding, llvm::DINode::FlagZero, 0,
+ Ty->getNumBits());
}
llvm::DIType *CGDebugInfo::CreateType(const ComplexType *Ty) {
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 301d577..01f2161 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -2297,9 +2297,13 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
CGM.getABIInfo().getOptimalVectorMemoryType(VecTy, getLangOpts());
if (!ClangVecTy->isPackedVectorBoolType(getContext()) &&
VecTy != NewVecTy) {
- SmallVector<int, 16> Mask(NewVecTy->getNumElements(), -1);
+ SmallVector<int, 16> Mask(NewVecTy->getNumElements(),
+ VecTy->getNumElements());
std::iota(Mask.begin(), Mask.begin() + VecTy->getNumElements(), 0);
- Value = Builder.CreateShuffleVector(Value, Mask, "extractVec");
+ // Use undef instead of poison for the padding lanes, to make sure no
+ // padding bits are poisoned, which may break coercion.
+ Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy),
+ Mask, "extractVec");
SrcTy = NewVecTy;
}
if (Addr.getElementType() != SrcTy)
diff --git a/clang/lib/CodeGen/CGHLSLBuiltins.cpp b/clang/lib/CodeGen/CGHLSLBuiltins.cpp
index 384bd59..fbf4a57 100644
--- a/clang/lib/CodeGen/CGHLSLBuiltins.cpp
+++ b/clang/lib/CodeGen/CGHLSLBuiltins.cpp
@@ -206,7 +206,7 @@ static Intrinsic::ID getWaveActiveSumIntrinsic(llvm::Triple::ArchType Arch,
}
}
-// Return wave active sum that corresponds to the QT scalar type
+// Return wave active max that corresponds to the QT scalar type
static Intrinsic::ID getWaveActiveMaxIntrinsic(llvm::Triple::ArchType Arch,
CGHLSLRuntime &RT, QualType QT) {
switch (Arch) {
@@ -225,6 +225,25 @@ static Intrinsic::ID getWaveActiveMaxIntrinsic(llvm::Triple::ArchType Arch,
}
}
+// Return wave active min that corresponds to the QT scalar type
+static Intrinsic::ID getWaveActiveMinIntrinsic(llvm::Triple::ArchType Arch,
+ CGHLSLRuntime &RT, QualType QT) {
+ switch (Arch) {
+ case llvm::Triple::spirv:
+ if (QT->isUnsignedIntegerType())
+ return Intrinsic::spv_wave_reduce_umin;
+ return Intrinsic::spv_wave_reduce_min;
+ case llvm::Triple::dxil: {
+ if (QT->isUnsignedIntegerType())
+ return Intrinsic::dx_wave_reduce_umin;
+ return Intrinsic::dx_wave_reduce_min;
+ }
+ default:
+ llvm_unreachable("Intrinsic WaveActiveMin"
+ " not supported by target architecture");
+ }
+}
+
// Returns the mangled name for a builtin function that the SPIR-V backend
// will expand into a spec Constant.
static std::string getSpecConstantFunctionName(clang::QualType SpecConstantType,
@@ -742,6 +761,17 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
&CGM.getModule(), IID, {OpExpr->getType()}),
ArrayRef{OpExpr}, "hlsl.wave.active.max");
}
+ case Builtin::BI__builtin_hlsl_wave_active_min: {
+ // Due to the use of variadic arguments, explicitly retreive argument
+ Value *OpExpr = EmitScalarExpr(E->getArg(0));
+ Intrinsic::ID IID = getWaveActiveMinIntrinsic(
+ getTarget().getTriple().getArch(), CGM.getHLSLRuntime(),
+ E->getArg(0)->getType());
+
+ return EmitRuntimeCall(Intrinsic::getOrInsertDeclaration(
+ &CGM.getModule(), IID, {OpExpr->getType()}),
+ ArrayRef{OpExpr}, "hlsl.wave.active.min");
+ }
case Builtin::BI__builtin_hlsl_wave_get_lane_index: {
// We don't define a SPIR-V intrinsic, instead it is a SPIR-V built-in
// defined in SPIRVBuiltins.td. So instead we manually get the matching name
diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp
index 40ea513..71c5280 100644
--- a/clang/lib/Driver/Driver.cpp
+++ b/clang/lib/Driver/Driver.cpp
@@ -308,9 +308,18 @@ InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings,
auto ArgString = A->getAsString(Args);
std::string Nearest;
if (getOpts().findNearest(ArgString, Nearest, VisibilityMask) > 1) {
- if (!IsCLMode() &&
- getOpts().findExact(ArgString, Nearest,
- llvm::opt::Visibility(options::CC1Option))) {
+ if (IsFlangMode()) {
+ if (getOpts().findExact(ArgString, Nearest,
+ llvm::opt::Visibility(options::FC1Option))) {
+ DiagID = diag::err_drv_unknown_argument_with_suggestion;
+ Diags.Report(DiagID) << ArgString << "-Xflang " + Nearest;
+ } else {
+ DiagID = diag::err_drv_unknown_argument;
+ Diags.Report(DiagID) << ArgString;
+ }
+ } else if (!IsCLMode() && getOpts().findExact(ArgString, Nearest,
+ llvm::opt::Visibility(
+ options::CC1Option))) {
DiagID = diag::err_drv_unknown_argument_with_suggestion;
Diags.Report(DiagID) << ArgString << "-Xclang " + Nearest;
} else {
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index 79edc56..4e8f63e 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -1414,17 +1414,18 @@ static void CollectARMPACBTIOptions(const ToolChain &TC, const ArgList &Args,
GuardedControlStack = PBP.GuardedControlStack;
}
- bool HasPtrauthReturns = llvm::any_of(CmdArgs, [](const char *Arg) {
- return StringRef(Arg) == "-fptrauth-returns";
- });
+ Arg *PtrauthReturnsArg = Args.getLastArg(options::OPT_fptrauth_returns,
+ options::OPT_fno_ptrauth_returns);
+ bool HasPtrauthReturns =
+ PtrauthReturnsArg &&
+ PtrauthReturnsArg->getOption().matches(options::OPT_fptrauth_returns);
// GCS is currently untested with ptrauth-returns, but enabling this could be
// allowed in future after testing with a suitable system.
- if (HasPtrauthReturns &&
- (Scope != "none" || BranchProtectionPAuthLR || GuardedControlStack)) {
+ if (Scope != "none" || BranchProtectionPAuthLR || GuardedControlStack) {
if (Triple.getEnvironment() == llvm::Triple::PAuthTest)
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << Triple.getTriple();
- else
+ else if (HasPtrauthReturns)
D.Diag(diag::err_drv_incompatible_options)
<< A->getAsString(Args) << "-fptrauth-returns";
}
@@ -1670,34 +1671,42 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
AddUnalignedAccessWarning(CmdArgs);
- Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_intrinsics,
- options::OPT_fno_ptrauth_intrinsics);
- Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_calls,
- options::OPT_fno_ptrauth_calls);
- Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_returns,
- options::OPT_fno_ptrauth_returns);
- Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_auth_traps,
- options::OPT_fno_ptrauth_auth_traps);
- Args.addOptInFlag(
- CmdArgs, options::OPT_fptrauth_vtable_pointer_address_discrimination,
- options::OPT_fno_ptrauth_vtable_pointer_address_discrimination);
- Args.addOptInFlag(
- CmdArgs, options::OPT_fptrauth_vtable_pointer_type_discrimination,
- options::OPT_fno_ptrauth_vtable_pointer_type_discrimination);
- Args.addOptInFlag(
- CmdArgs, options::OPT_fptrauth_type_info_vtable_pointer_discrimination,
- options::OPT_fno_ptrauth_type_info_vtable_pointer_discrimination);
- Args.addOptInFlag(
- CmdArgs, options::OPT_fptrauth_function_pointer_type_discrimination,
- options::OPT_fno_ptrauth_function_pointer_type_discrimination);
-
- Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_indirect_gotos,
- options::OPT_fno_ptrauth_indirect_gotos);
- Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_init_fini,
- options::OPT_fno_ptrauth_init_fini);
- Args.addOptInFlag(CmdArgs,
- options::OPT_fptrauth_init_fini_address_discrimination,
- options::OPT_fno_ptrauth_init_fini_address_discrimination);
+ if (Triple.isOSDarwin() ||
+ (Triple.isOSLinux() &&
+ Triple.getEnvironment() == llvm::Triple::PAuthTest)) {
+ Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_intrinsics,
+ options::OPT_fno_ptrauth_intrinsics);
+ Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_calls,
+ options::OPT_fno_ptrauth_calls);
+ Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_returns,
+ options::OPT_fno_ptrauth_returns);
+ Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_auth_traps,
+ options::OPT_fno_ptrauth_auth_traps);
+ Args.addOptInFlag(
+ CmdArgs, options::OPT_fptrauth_vtable_pointer_address_discrimination,
+ options::OPT_fno_ptrauth_vtable_pointer_address_discrimination);
+ Args.addOptInFlag(
+ CmdArgs, options::OPT_fptrauth_vtable_pointer_type_discrimination,
+ options::OPT_fno_ptrauth_vtable_pointer_type_discrimination);
+ Args.addOptInFlag(
+ CmdArgs, options::OPT_fptrauth_type_info_vtable_pointer_discrimination,
+ options::OPT_fno_ptrauth_type_info_vtable_pointer_discrimination);
+ Args.addOptInFlag(
+ CmdArgs, options::OPT_fptrauth_function_pointer_type_discrimination,
+ options::OPT_fno_ptrauth_function_pointer_type_discrimination);
+ Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_indirect_gotos,
+ options::OPT_fno_ptrauth_indirect_gotos);
+ }
+ if (Triple.isOSLinux() &&
+ Triple.getEnvironment() == llvm::Triple::PAuthTest) {
+ Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_init_fini,
+ options::OPT_fno_ptrauth_init_fini);
+ Args.addOptInFlag(
+ CmdArgs, options::OPT_fptrauth_init_fini_address_discrimination,
+ options::OPT_fno_ptrauth_init_fini_address_discrimination);
+ Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_elf_got,
+ options::OPT_fno_ptrauth_elf_got);
+ }
Args.addOptInFlag(CmdArgs, options::OPT_faarch64_jump_table_hardening,
options::OPT_fno_aarch64_jump_table_hardening);
diff --git a/clang/lib/Driver/ToolChains/Fuchsia.cpp b/clang/lib/Driver/ToolChains/Fuchsia.cpp
index 31c2f3f..507cc03 100644
--- a/clang/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/clang/lib/Driver/ToolChains/Fuchsia.cpp
@@ -483,7 +483,8 @@ SanitizerMask Fuchsia::getSupportedSanitizers() const {
Res |= SanitizerKind::Leak;
Res |= SanitizerKind::Scudo;
Res |= SanitizerKind::Thread;
- if (getTriple().getArch() == llvm::Triple::x86_64) {
+ if (getTriple().getArch() == llvm::Triple::x86_64 ||
+ getTriple().getArch() == llvm::Triple::x86) {
Res |= SanitizerKind::SafeStack;
}
return Res;
@@ -496,6 +497,7 @@ SanitizerMask Fuchsia::getDefaultSanitizers() const {
case llvm::Triple::riscv64:
Res |= SanitizerKind::ShadowCallStack;
break;
+ case llvm::Triple::x86:
case llvm::Triple::x86_64:
Res |= SanitizerKind::SafeStack;
break;
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 1d0dfd0b..021d8c6 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -2674,8 +2674,11 @@ private:
}
// *a or &a or &&a.
- if (PreviousNotConst->is(TT_PointerOrReference))
+ if (PreviousNotConst->is(TT_PointerOrReference) ||
+ PreviousNotConst->endsSequence(tok::coloncolon,
+ TT_PointerOrReference)) {
return true;
+ }
// MyClass a;
if (PreviousNotConst->isTypeName(LangOpts))
diff --git a/clang/lib/Frontend/TextDiagnostic.cpp b/clang/lib/Frontend/TextDiagnostic.cpp
index 5888571..f5add2a 100644
--- a/clang/lib/Frontend/TextDiagnostic.cpp
+++ b/clang/lib/Frontend/TextDiagnostic.cpp
@@ -17,7 +17,6 @@
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Locale.h"
-#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <optional>
@@ -662,7 +661,7 @@ void TextDiagnostic::emitDiagnosticMessage(
FullSourceLoc Loc, PresumedLoc PLoc, DiagnosticsEngine::Level Level,
StringRef Message, ArrayRef<clang::CharSourceRange> Ranges,
DiagOrStoredDiag D) {
- uint64_t StartOfLocationInfo = OS.tell();
+ uint64_t StartOfLocationInfo = OS.getColumn();
// Emit the location of this particular diagnostic.
if (Loc.isValid())
@@ -675,8 +674,11 @@ void TextDiagnostic::emitDiagnosticMessage(
printDiagnosticLevel(OS, Level, DiagOpts.ShowColors);
printDiagnosticMessage(OS,
/*IsSupplemental*/ Level == DiagnosticsEngine::Note,
- Message, OS.tell() - StartOfLocationInfo,
+ Message, OS.getColumn() - StartOfLocationInfo,
DiagOpts.MessageLength, DiagOpts.ShowColors);
+ // We use a formatted ostream, which does its own buffering. Flush here
+ // so we keep the proper order of output.
+ OS.flush();
}
/*static*/ void
@@ -1485,7 +1487,7 @@ void TextDiagnostic::emitSnippet(StringRef SourceLine,
if (CharStyle != Styles.end()) {
if (!CurrentColor ||
(CurrentColor && *CurrentColor != CharStyle->Color)) {
- OS.changeColor(CharStyle->Color, false);
+ OS.changeColor(CharStyle->Color);
CurrentColor = CharStyle->Color;
}
} else if (CurrentColor) {
diff --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h
index 0fcfe37..263a107 100644
--- a/clang/lib/Headers/avx512vlbwintrin.h
+++ b/clang/lib/Headers/avx512vlbwintrin.h
@@ -2385,22 +2385,19 @@ _mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A)
(__mmask32) __U);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
-_mm_test_epi8_mask (__m128i __A, __m128i __B)
-{
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_test_epi8_mask(__m128i __A, __m128i __B) {
return _mm_cmpneq_epi8_mask (_mm_and_si128(__A, __B), _mm_setzero_si128());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
-_mm_mask_test_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
-{
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_mask_test_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) {
return _mm_mask_cmpneq_epi8_mask (__U, _mm_and_si128 (__A, __B),
_mm_setzero_si128());
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
-_mm256_test_epi8_mask (__m256i __A, __m256i __B)
-{
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_test_epi8_mask(__m256i __A, __m256i __B) {
return _mm256_cmpneq_epi8_mask (_mm256_and_si256(__A, __B),
_mm256_setzero_si256());
}
@@ -2439,9 +2436,8 @@ _mm256_mask_test_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
_mm256_setzero_si256());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
-_mm_testn_epi8_mask (__m128i __A, __m128i __B)
-{
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR
+_mm_testn_epi8_mask(__m128i __A, __m128i __B) {
return _mm_cmpeq_epi8_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
}
diff --git a/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h
index d973371..a918af3 100644
--- a/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h
+++ b/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h
@@ -2598,6 +2598,129 @@ _HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_max)
__attribute__((convergent)) double4 WaveActiveMax(double4);
//===----------------------------------------------------------------------===//
+// WaveActiveMin builtins
+//===----------------------------------------------------------------------===//
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) half WaveActiveMin(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) half2 WaveActiveMin(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) half3 WaveActiveMin(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) half4 WaveActiveMin(half4);
+
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) int16_t WaveActiveMin(int16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) int16_t2 WaveActiveMin(int16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) int16_t3 WaveActiveMin(int16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) int16_t4 WaveActiveMin(int16_t4);
+
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) uint16_t WaveActiveMin(uint16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) uint16_t2 WaveActiveMin(uint16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) uint16_t3 WaveActiveMin(uint16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) uint16_t4 WaveActiveMin(uint16_t4);
+#endif
+
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) int WaveActiveMin(int);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) int2 WaveActiveMin(int2);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) int3 WaveActiveMin(int3);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) int4 WaveActiveMin(int4);
+
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) uint WaveActiveMin(uint);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) uint2 WaveActiveMin(uint2);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) uint3 WaveActiveMin(uint3);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) uint4 WaveActiveMin(uint4);
+
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) int64_t WaveActiveMin(int64_t);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) int64_t2 WaveActiveMin(int64_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) int64_t3 WaveActiveMin(int64_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) int64_t4 WaveActiveMin(int64_t4);
+
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) uint64_t WaveActiveMin(uint64_t);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) uint64_t2 WaveActiveMin(uint64_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) uint64_t3 WaveActiveMin(uint64_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) uint64_t4 WaveActiveMin(uint64_t4);
+
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) float WaveActiveMin(float);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) float2 WaveActiveMin(float2);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) float3 WaveActiveMin(float3);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) float4 WaveActiveMin(float4);
+
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) double WaveActiveMin(double);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) double2 WaveActiveMin(double2);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) double3 WaveActiveMin(double3);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_min)
+__attribute__((convergent)) double4 WaveActiveMin(double4);
+
+//===----------------------------------------------------------------------===//
// WaveActiveSum builtins
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/Interpreter/InterpreterValuePrinter.cpp b/clang/lib/Interpreter/InterpreterValuePrinter.cpp
index 0ed02f3..cfa50ee 100644
--- a/clang/lib/Interpreter/InterpreterValuePrinter.cpp
+++ b/clang/lib/Interpreter/InterpreterValuePrinter.cpp
@@ -411,7 +411,8 @@ public:
}
InterfaceKind VisitReferenceType(const ReferenceType *Ty) {
- ExprResult AddrOfE = S.CreateBuiltinUnaryOp(SourceLocation(), UO_AddrOf, E);
+ ExprResult AddrOfE = S.CreateBuiltinUnaryOp(SourceLocation(), UO_AddrOf,
+ E->IgnoreImpCasts());
assert(!AddrOfE.isInvalid() && "Can not create unary expression");
Args.push_back(AddrOfE.get());
return InterfaceKind::NoAlloc;
@@ -537,7 +538,7 @@ llvm::Expected<Expr *> Interpreter::convertExprToValue(Expr *E) {
QualType DesugaredTy = Ty.getDesugaredType(Ctx);
// For lvalue struct, we treat it as a reference.
- if (DesugaredTy->isRecordType() && E->isLValue()) {
+ if (DesugaredTy->isRecordType() && E->IgnoreImpCasts()->isLValue()) {
DesugaredTy = Ctx.getLValueReferenceType(DesugaredTy);
Ty = Ctx.getLValueReferenceType(Ty);
}
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index f04cc45..fb4d0b45 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -2408,11 +2408,16 @@ const NormalizedConstraint *Sema::getNormalizedAssociatedConstraints(
if (CacheEntry == NormalizationCache.end()) {
auto *Normalized = NormalizedConstraint::fromAssociatedConstraints(
*this, ND, AssociatedConstraints);
+ if (!Normalized) {
+ NormalizationCache.try_emplace(ConstrainedDeclOrNestedReq, nullptr);
+ return nullptr;
+ }
+ // substitute() can invalidate iterators of NormalizationCache.
+ bool Failed = SubstituteParameterMappings(*this).substitute(*Normalized);
CacheEntry =
NormalizationCache.try_emplace(ConstrainedDeclOrNestedReq, Normalized)
.first;
- if (!Normalized ||
- SubstituteParameterMappings(*this).substitute(*Normalized))
+ if (Failed)
return nullptr;
}
return CacheEntry->second;
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 96d5142..94a490a 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -3279,6 +3279,7 @@ bool SemaHLSL::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
break;
}
case Builtin::BI__builtin_hlsl_wave_active_max:
+ case Builtin::BI__builtin_hlsl_wave_active_min:
case Builtin::BI__builtin_hlsl_wave_active_sum: {
if (SemaRef.checkArgCount(TheCall, 1))
return true;