diff options
Diffstat (limited to 'clang/lib')
| -rw-r--r-- | clang/lib/AST/ByteCode/Interp.cpp | 32 | ||||
| -rw-r--r-- | clang/lib/AST/ByteCode/InterpBuiltin.cpp | 83 | ||||
| -rw-r--r-- | clang/lib/AST/ExprConstant.cpp | 83 | ||||
| -rw-r--r-- | clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp | 89 | ||||
| -rw-r--r-- | clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp | 4 | ||||
| -rw-r--r-- | clang/lib/CodeGen/BackendUtil.cpp | 36 | ||||
| -rw-r--r-- | clang/lib/CodeGen/CGBuiltin.cpp | 9 | ||||
| -rw-r--r-- | clang/lib/Driver/ToolChains/Flang.cpp | 7 | ||||
| -rw-r--r-- | clang/lib/Format/WhitespaceManager.cpp | 25 | ||||
| -rw-r--r-- | clang/lib/Headers/avx512vlbwintrin.h | 20 | ||||
| -rw-r--r-- | clang/lib/Sema/SemaConcept.cpp | 9 |
11 files changed, 344 insertions, 53 deletions
diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp index d640be0..a2fb0fb 100644 --- a/clang/lib/AST/ByteCode/Interp.cpp +++ b/clang/lib/AST/ByteCode/Interp.cpp @@ -1651,8 +1651,8 @@ static bool GetDynamicDecl(InterpState &S, CodePtr OpPC, Pointer TypePtr, QualType DynamicType = TypePtr.getType(); if (TypePtr.isStatic() || TypePtr.isConst()) { - const VarDecl *VD = TypePtr.getDeclDesc()->asVarDecl(); - if (!VD->isConstexpr()) { + if (const VarDecl *VD = TypePtr.getDeclDesc()->asVarDecl(); + VD && !VD->isConstexpr()) { const Expr *E = S.Current->getExpr(OpPC); APValue V = TypePtr.toAPValue(S.getASTContext()); QualType TT = S.getASTContext().getLValueReferenceType(DynamicType); @@ -1683,20 +1683,6 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func, Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset); const FunctionDecl *Callee = Func->getDecl(); - if (!Func->isFullyCompiled()) - compileFunction(S, Func); - - // C++2a [class.abstract]p6: - // the effect of making a virtual call to a pure virtual function [...] is - // undefined - if (Callee->isPureVirtual()) { - S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_pure_virtual_call, - 1) - << Callee; - S.Note(Callee->getLocation(), diag::note_declared_at); - return false; - } - const CXXRecordDecl *DynamicDecl = nullptr; if (!GetDynamicDecl(S, OpPC, ThisPtr, DynamicDecl)) return false; @@ -1706,7 +1692,8 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func, const auto *InitialFunction = cast<CXXMethodDecl>(Callee); const CXXMethodDecl *Overrider; - if (StaticDecl != DynamicDecl) { + if (StaticDecl != DynamicDecl && + !llvm::is_contained(S.InitializingBlocks, ThisPtr.block())) { if (!DynamicDecl->isDerivedFrom(StaticDecl)) return false; Overrider = S.getContext().getOverridingFunction(DynamicDecl, StaticDecl, @@ -1716,6 +1703,17 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func, Overrider = InitialFunction; } + // C++2a [class.abstract]p6: + // the effect of making a virtual call to a pure virtual function [...] is + // undefined + if (Overrider->isPureVirtual()) { + S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_pure_virtual_call, + 1) + << Callee; + S.Note(Callee->getLocation(), diag::note_declared_at); + return false; + } + if (Overrider != InitialFunction) { // DR1872: An instantiated virtual constexpr function can't be called in a // constant expression (prior to C++20). We can still constant-fold such a diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp index 8f23001..ab6b3ed 100644 --- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp +++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp @@ -3296,6 +3296,60 @@ static bool interp__builtin_vec_set(InterpState &S, CodePtr OpPC, return true; } +static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B, + bool IsUnsigned) { + switch (Imm & 0x7) { + case 0x00: // _MM_CMPINT_EQ + return (A == B); + case 0x01: // _MM_CMPINT_LT + return IsUnsigned ? A.ult(B) : A.slt(B); + case 0x02: // _MM_CMPINT_LE + return IsUnsigned ? A.ule(B) : A.sle(B); + case 0x03: // _MM_CMPINT_FALSE + return false; + case 0x04: // _MM_CMPINT_NE + return (A != B); + case 0x05: // _MM_CMPINT_NLT + return IsUnsigned ? A.ugt(B) : A.sgt(B); + case 0x06: // _MM_CMPINT_NLE + return IsUnsigned ? A.uge(B) : A.sge(B); + case 0x07: // _MM_CMPINT_TRUE + return true; + default: + llvm_unreachable("Invalid Op"); + } +} + +static bool interp__builtin_ia32_cmp_mask(InterpState &S, CodePtr OpPC, + const CallExpr *Call, unsigned ID, + bool IsUnsigned) { + assert(Call->getNumArgs() == 4); + + APSInt Mask = popToAPSInt(S, Call->getArg(3)); + APSInt Opcode = popToAPSInt(S, Call->getArg(2)); + unsigned CmpOp = static_cast<unsigned>(Opcode.getZExtValue()); + const Pointer &RHS = S.Stk.pop<Pointer>(); + const Pointer &LHS = S.Stk.pop<Pointer>(); + + assert(LHS.getNumElems() == RHS.getNumElems()); + + APInt RetMask = APInt::getZero(LHS.getNumElems()); + unsigned VectorLen = LHS.getNumElems(); + PrimType ElemT = LHS.getFieldDesc()->getPrimType(); + + for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) { + APSInt A, B; + INT_TYPE_SWITCH_NO_BOOL(ElemT, { + A = LHS.elem<T>(ElemNum).toAPSInt(); + B = RHS.elem<T>(ElemNum).toAPSInt(); + }); + RetMask.setBitVal(ElemNum, + Mask[ElemNum] && evalICmpImm(CmpOp, A, B, IsUnsigned)); + } + pushInteger(S, RetMask, Call->getType()); + return true; +} + static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC, const CallExpr *Call) { assert(Call->getNumArgs() == 1); @@ -4488,6 +4542,35 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, case X86::BI__builtin_ia32_vec_set_v4di: return interp__builtin_vec_set(S, OpPC, Call, BuiltinID); + case X86::BI__builtin_ia32_cmpb128_mask: + case X86::BI__builtin_ia32_cmpw128_mask: + case X86::BI__builtin_ia32_cmpd128_mask: + case X86::BI__builtin_ia32_cmpq128_mask: + case X86::BI__builtin_ia32_cmpb256_mask: + case X86::BI__builtin_ia32_cmpw256_mask: + case X86::BI__builtin_ia32_cmpd256_mask: + case X86::BI__builtin_ia32_cmpq256_mask: + case X86::BI__builtin_ia32_cmpb512_mask: + case X86::BI__builtin_ia32_cmpw512_mask: + case X86::BI__builtin_ia32_cmpd512_mask: + case X86::BI__builtin_ia32_cmpq512_mask: + return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID, + /*IsUnsigned=*/false); + + case X86::BI__builtin_ia32_ucmpb128_mask: + case X86::BI__builtin_ia32_ucmpw128_mask: + case X86::BI__builtin_ia32_ucmpd128_mask: + case X86::BI__builtin_ia32_ucmpq128_mask: + case X86::BI__builtin_ia32_ucmpb256_mask: + case X86::BI__builtin_ia32_ucmpw256_mask: + case X86::BI__builtin_ia32_ucmpd256_mask: + case X86::BI__builtin_ia32_ucmpq256_mask: + case X86::BI__builtin_ia32_ucmpb512_mask: + case X86::BI__builtin_ia32_ucmpw512_mask: + case X86::BI__builtin_ia32_ucmpd512_mask: + case X86::BI__builtin_ia32_ucmpq512_mask: + return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID, + /*IsUnsigned=*/true); case X86::BI__builtin_ia32_pslldqi128_byteshift: case X86::BI__builtin_ia32_pslldqi256_byteshift: case X86::BI__builtin_ia32_pslldqi512_byteshift: diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index 29ee089..d0404b9 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -15766,6 +15766,89 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, unsigned Idx = static_cast<unsigned>(IdxAPS.getZExtValue() & (N - 1)); return Success(Vec.getVectorElt(Idx).getInt(), E); } + + case clang::X86::BI__builtin_ia32_cmpb128_mask: + case clang::X86::BI__builtin_ia32_cmpw128_mask: + case clang::X86::BI__builtin_ia32_cmpd128_mask: + case clang::X86::BI__builtin_ia32_cmpq128_mask: + case clang::X86::BI__builtin_ia32_cmpb256_mask: + case clang::X86::BI__builtin_ia32_cmpw256_mask: + case clang::X86::BI__builtin_ia32_cmpd256_mask: + case clang::X86::BI__builtin_ia32_cmpq256_mask: + case clang::X86::BI__builtin_ia32_cmpb512_mask: + case clang::X86::BI__builtin_ia32_cmpw512_mask: + case clang::X86::BI__builtin_ia32_cmpd512_mask: + case clang::X86::BI__builtin_ia32_cmpq512_mask: + case clang::X86::BI__builtin_ia32_ucmpb128_mask: + case clang::X86::BI__builtin_ia32_ucmpw128_mask: + case clang::X86::BI__builtin_ia32_ucmpd128_mask: + case clang::X86::BI__builtin_ia32_ucmpq128_mask: + case clang::X86::BI__builtin_ia32_ucmpb256_mask: + case clang::X86::BI__builtin_ia32_ucmpw256_mask: + case clang::X86::BI__builtin_ia32_ucmpd256_mask: + case clang::X86::BI__builtin_ia32_ucmpq256_mask: + case clang::X86::BI__builtin_ia32_ucmpb512_mask: + case clang::X86::BI__builtin_ia32_ucmpw512_mask: + case clang::X86::BI__builtin_ia32_ucmpd512_mask: + case clang::X86::BI__builtin_ia32_ucmpq512_mask: { + assert(E->getNumArgs() == 4); + + bool IsUnsigned = + (BuiltinOp >= clang::X86::BI__builtin_ia32_ucmpb128_mask && + BuiltinOp <= clang::X86::BI__builtin_ia32_ucmpq512_mask); + + APValue LHS, RHS; + APSInt Mask, Opcode; + if (!EvaluateVector(E->getArg(0), LHS, Info) || + !EvaluateVector(E->getArg(1), RHS, Info) || + !EvaluateInteger(E->getArg(2), Opcode, Info) || + !EvaluateInteger(E->getArg(3), Mask, Info)) + return false; + + assert(LHS.getVectorLength() == RHS.getVectorLength()); + + unsigned VectorLen = LHS.getVectorLength(); + unsigned RetWidth = Mask.getBitWidth(); + + APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true); + + for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) { + const APSInt &A = LHS.getVectorElt(ElemNum).getInt(); + const APSInt &B = RHS.getVectorElt(ElemNum).getInt(); + bool Result = false; + + switch (Opcode.getExtValue() & 0x7) { + case 0: // _MM_CMPINT_EQ + Result = (A == B); + break; + case 1: // _MM_CMPINT_LT + Result = IsUnsigned ? A.ult(B) : A.slt(B); + break; + case 2: // _MM_CMPINT_LE + Result = IsUnsigned ? A.ule(B) : A.sle(B); + break; + case 3: // _MM_CMPINT_FALSE + Result = false; + break; + case 4: // _MM_CMPINT_NE + Result = (A != B); + break; + case 5: // _MM_CMPINT_NLT (>=) + Result = IsUnsigned ? A.uge(B) : A.sge(B); + break; + case 6: // _MM_CMPINT_NLE (>) + Result = IsUnsigned ? A.ugt(B) : A.sgt(B); + break; + case 7: // _MM_CMPINT_TRUE + Result = true; + break; + } + + RetMask.setBitVal(ElemNum, Mask[ElemNum] && Result); + } + + return Success(APValue(RetMask), E); + } } } diff --git a/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp b/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp index 90551c2..b42bfa3 100644 --- a/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp +++ b/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp @@ -177,6 +177,41 @@ static auto isPointerComparisonOperatorCall(std::string operator_name) { pointee(anyOf(statusOrType(), statusType()))))))); } +// The nullPointerConstant in the two matchers below is to support +// absl::StatusOr<void*> X = nullptr. +// nullptr does not match the bound type. +// TODO: be less restrictive around convertible types in general. +static auto isStatusOrValueAssignmentCall() { + using namespace ::clang::ast_matchers; // NOLINT: Too many names + return cxxOperatorCallExpr( + hasOverloadedOperatorName("="), + callee(cxxMethodDecl(ofClass(statusOrClass()))), + hasArgument(1, anyOf(hasType(hasUnqualifiedDesugaredType( + type(equalsBoundNode("T")))), + nullPointerConstant()))); +} + +static auto isStatusOrValueConstructor() { + using namespace ::clang::ast_matchers; // NOLINT: Too many names + return cxxConstructExpr( + hasType(statusOrType()), + hasArgument(0, + anyOf(hasType(hasCanonicalType(type(equalsBoundNode("T")))), + nullPointerConstant(), + hasType(namedDecl(hasAnyName("absl::in_place_t", + "std::in_place_t")))))); +} + +static auto isStatusOrConstructor() { + using namespace ::clang::ast_matchers; // NOLINT: Too many names + return cxxConstructExpr(hasType(statusOrType())); +} + +static auto isStatusConstructor() { + using namespace ::clang::ast_matchers; // NOLINT: Too many names + return cxxConstructExpr(hasType(statusType())); +} + static auto buildDiagnoseMatchSwitch(const UncheckedStatusOrAccessModelOptions &Options) { return CFGMatchSwitchBuilder<const Environment, @@ -528,6 +563,46 @@ static void transferEmplaceCall(const CXXMemberCallExpr *Expr, State.Env.assume(OkVal.formula()); } +static void transferValueAssignmentCall(const CXXOperatorCallExpr *Expr, + const MatchFinder::MatchResult &, + LatticeTransferState &State) { + assert(Expr->getNumArgs() > 1); + + auto *StatusOrLoc = State.Env.get<RecordStorageLocation>(*Expr->getArg(0)); + if (StatusOrLoc == nullptr) + return; + + auto &OkVal = initializeStatusOr(*StatusOrLoc, State.Env); + State.Env.assume(OkVal.formula()); +} + +static void transferValueConstructor(const CXXConstructExpr *Expr, + const MatchFinder::MatchResult &, + LatticeTransferState &State) { + auto &OkVal = + initializeStatusOr(State.Env.getResultObjectLocation(*Expr), State.Env); + State.Env.assume(OkVal.formula()); +} + +static void transferStatusOrConstructor(const CXXConstructExpr *Expr, + const MatchFinder::MatchResult &, + LatticeTransferState &State) { + RecordStorageLocation &StatusOrLoc = State.Env.getResultObjectLocation(*Expr); + RecordStorageLocation &StatusLoc = locForStatus(StatusOrLoc); + + if (State.Env.getValue(locForOk(StatusLoc)) == nullptr) + initializeStatusOr(StatusOrLoc, State.Env); +} + +static void transferStatusConstructor(const CXXConstructExpr *Expr, + const MatchFinder::MatchResult &, + LatticeTransferState &State) { + RecordStorageLocation &StatusLoc = State.Env.getResultObjectLocation(*Expr); + + if (State.Env.getValue(locForOk(StatusLoc)) == nullptr) + initializeStatus(StatusLoc, State.Env); +} + CFGMatchSwitch<LatticeTransferState> buildTransferMatchSwitch(ASTContext &Ctx, CFGMatchSwitchBuilder<LatticeTransferState> Builder) { @@ -573,6 +648,20 @@ buildTransferMatchSwitch(ASTContext &Ctx, .CaseOfCFGStmt<CallExpr>(isNotOkStatusCall(), transferNotOkStatusCall) .CaseOfCFGStmt<CXXMemberCallExpr>(isStatusOrMemberCallWithName("emplace"), transferEmplaceCall) + .CaseOfCFGStmt<CXXOperatorCallExpr>(isStatusOrValueAssignmentCall(), + transferValueAssignmentCall) + .CaseOfCFGStmt<CXXConstructExpr>(isStatusOrValueConstructor(), + transferValueConstructor) + // N.B. These need to come after all other CXXConstructExpr. + // These are there to make sure that every Status and StatusOr object + // have their ok boolean initialized when constructed. If we were to + // lazily initialize them when we first access them, we can produce + // false positives if that first access is in a control flow statement. + // You can comment out these two constructors and see tests fail. + .CaseOfCFGStmt<CXXConstructExpr>(isStatusOrConstructor(), + transferStatusOrConstructor) + .CaseOfCFGStmt<CXXConstructExpr>(isStatusConstructor(), + transferStatusConstructor) .Build(); } diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp index 21c96fe..ca7554e 100644 --- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp +++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp @@ -606,10 +606,12 @@ public: // `cir.try_call`. llvm::SmallVector<cir::CallOp, 4> callsToRewrite; tryOp.getTryRegion().walk([&](CallOp op) { + if (op.getNothrow()) + return; + // Only grab calls within immediate closest TryOp scope. if (op->getParentOfType<cir::TryOp>() != tryOp) return; - assert(!cir::MissingFeatures::opCallExceptionAttr()); callsToRewrite.push_back(op); }); diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp index 468c930..3c31314 100644 --- a/clang/lib/CodeGen/BackendUtil.cpp +++ b/clang/lib/CodeGen/BackendUtil.cpp @@ -713,14 +713,16 @@ static void addSanitizers(const Triple &TargetTriple, ThinOrFullLTOPhase) { if (CodeGenOpts.hasSanitizeCoverage()) { auto SancovOpts = getSancovOptsFromCGOpts(CodeGenOpts); - MPM.addPass(SanitizerCoveragePass( - SancovOpts, CodeGenOpts.SanitizeCoverageAllowlistFiles, - CodeGenOpts.SanitizeCoverageIgnorelistFiles)); + MPM.addPass( + SanitizerCoveragePass(SancovOpts, PB.getVirtualFileSystemPtr(), + CodeGenOpts.SanitizeCoverageAllowlistFiles, + CodeGenOpts.SanitizeCoverageIgnorelistFiles)); } if (CodeGenOpts.hasSanitizeBinaryMetadata()) { MPM.addPass(SanitizerBinaryMetadataPass( getSanitizerBinaryMetadataOptions(CodeGenOpts), + PB.getVirtualFileSystemPtr(), CodeGenOpts.SanitizeMetadataIgnorelistFiles)); } @@ -798,16 +800,6 @@ static void addSanitizers(const Triple &TargetTriple, MPM.addPass(DataFlowSanitizerPass(LangOpts.NoSanitizeFiles, PB.getVirtualFileSystemPtr())); } - - if (LangOpts.Sanitize.has(SanitizerKind::AllocToken)) { - if (Level == OptimizationLevel::O0) { - // The default pass builder only infers libcall function attrs when - // optimizing, so we insert it here because we need it for accurate - // memory allocation function detection. - MPM.addPass(InferFunctionAttrsPass()); - } - MPM.addPass(AllocTokenPass(getAllocTokenOptions(LangOpts, CodeGenOpts))); - } }; if (ClSanitizeOnOptimizerEarlyEP) { PB.registerOptimizerEarlyEPCallback( @@ -850,6 +842,23 @@ static void addSanitizers(const Triple &TargetTriple, } } +static void addAllocTokenPass(const Triple &TargetTriple, + const CodeGenOptions &CodeGenOpts, + const LangOptions &LangOpts, PassBuilder &PB) { + PB.registerOptimizerLastEPCallback([&](ModulePassManager &MPM, + OptimizationLevel Level, + ThinOrFullLTOPhase) { + if (Level == OptimizationLevel::O0 && + LangOpts.Sanitize.has(SanitizerKind::AllocToken)) { + // The default pass builder only infers libcall function attrs when + // optimizing, so we insert it here because we need it for accurate + // memory allocation function detection with -fsanitize=alloc-token. + MPM.addPass(InferFunctionAttrsPass()); + } + MPM.addPass(AllocTokenPass(getAllocTokenOptions(LangOpts, CodeGenOpts))); + }); +} + void EmitAssemblyHelper::RunOptimizationPipeline( BackendAction Action, std::unique_ptr<raw_pwrite_stream> &OS, std::unique_ptr<llvm::ToolOutputFile> &ThinLinkOS, BackendConsumer *BC) { @@ -1104,6 +1113,7 @@ void EmitAssemblyHelper::RunOptimizationPipeline( if (!IsThinLTOPostLink) { addSanitizers(TargetTriple, CodeGenOpts, LangOpts, PB); addKCFIPass(TargetTriple, LangOpts, PB); + addAllocTokenPass(TargetTriple, CodeGenOpts, LangOpts, PB); } if (std::optional<GCOVOptions> Options = diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index fd14cd6..b81e0d0 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -4506,6 +4506,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(AI); } + case Builtin::BI__builtin_infer_alloc_token: { + llvm::MDNode *MDN = buildAllocToken(E); + llvm::Value *MDV = MetadataAsValue::get(getLLVMContext(), MDN); + llvm::Function *F = + CGM.getIntrinsic(llvm::Intrinsic::alloc_token_id, {IntPtrTy}); + llvm::CallBase *TokenID = Builder.CreateCall(F, MDV); + return RValue::get(TokenID); + } + case Builtin::BIbzero: case Builtin::BI__builtin_bzero: { Address Dest = EmitPointerWithAlignment(E->getArg(0)); diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp index a56fa41..88bce18 100644 --- a/clang/lib/Driver/ToolChains/Flang.cpp +++ b/clang/lib/Driver/ToolChains/Flang.cpp @@ -945,6 +945,13 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA, assert(false && "Unexpected action class for Flang tool."); } + // We support some options that are invalid for Fortran and have no effect. + // These are solely for compatibility with other compilers. Emit a warning if + // any such options are provided, then proceed normally. + for (options::ID Opt : {options::OPT_fbuiltin, options::OPT_fno_builtin}) + if (const Arg *A = Args.getLastArg(Opt)) + D.Diag(diag::warn_drv_invalid_argument_for_flang) << A->getSpelling(); + const InputInfo &Input = Inputs[0]; types::ID InputType = Input.getType(); diff --git a/clang/lib/Format/WhitespaceManager.cpp b/clang/lib/Format/WhitespaceManager.cpp index 65fc65e..f24b8ab 100644 --- a/clang/lib/Format/WhitespaceManager.cpp +++ b/clang/lib/Format/WhitespaceManager.cpp @@ -288,6 +288,9 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End, ArrayRef<unsigned> Matches, SmallVector<WhitespaceManager::Change, 16> &Changes) { int Shift = 0; + // Set when the shift is applied anywhere in the line. Cleared when the line + // ends. + bool LineShifted = false; // ScopeStack keeps track of the current scope depth. It contains the levels // of at most 2 scopes. The first one is the one that the matched token is @@ -339,8 +342,11 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End, Changes[i - 1].Tok->is(tok::string_literal); bool SkipMatchCheck = InsideNestedScope || ContinuedStringLiteral; - if (CurrentChange.NewlinesBefore > 0 && !SkipMatchCheck) - Shift = 0; + if (CurrentChange.NewlinesBefore > 0) { + LineShifted = false; + if (!SkipMatchCheck) + Shift = 0; + } // If this is the first matching token to be aligned, remember by how many // spaces it has to be shifted, so the rest of the changes on the line are @@ -349,7 +355,6 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End, Shift = Column - (RightJustify ? CurrentChange.TokenLength : 0) - CurrentChange.StartOfTokenColumn; ScopeStack = {CurrentChange.indentAndNestingLevel()}; - CurrentChange.Spaces += Shift; } if (Shift == 0) @@ -358,8 +363,10 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End, // This is for lines that are split across multiple lines, as mentioned in // the ScopeStack comment. The stack size being 1 means that the token is // not in a scope that should not move. - if (ScopeStack.size() == 1u && CurrentChange.NewlinesBefore > 0 && - (ContinuedStringLiteral || InsideNestedScope)) { + if ((!Matches.empty() && Matches[0] == i) || + (ScopeStack.size() == 1u && CurrentChange.NewlinesBefore > 0 && + (ContinuedStringLiteral || InsideNestedScope))) { + LineShifted = true; CurrentChange.Spaces += Shift; } @@ -369,9 +376,11 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End, static_cast<int>(Changes[i].Tok->SpacesRequiredBefore) || CurrentChange.Tok->is(tok::eof)); - CurrentChange.StartOfTokenColumn += Shift; - if (i + 1 != Changes.size()) - Changes[i + 1].PreviousEndOfTokenColumn += Shift; + if (LineShifted) { + CurrentChange.StartOfTokenColumn += Shift; + if (i + 1 != Changes.size()) + Changes[i + 1].PreviousEndOfTokenColumn += Shift; + } // If PointerAlignment is PAS_Right, keep *s or &s next to the token, // except if the token is equal, then a space is needed. diff --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h index 0fcfe37..263a107 100644 --- a/clang/lib/Headers/avx512vlbwintrin.h +++ b/clang/lib/Headers/avx512vlbwintrin.h @@ -2385,22 +2385,19 @@ _mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A) (__mmask32) __U); } -static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 -_mm_test_epi8_mask (__m128i __A, __m128i __B) -{ +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_test_epi8_mask(__m128i __A, __m128i __B) { return _mm_cmpneq_epi8_mask (_mm_and_si128(__A, __B), _mm_setzero_si128()); } -static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 -_mm_mask_test_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B) -{ +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_mask_test_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) { return _mm_mask_cmpneq_epi8_mask (__U, _mm_and_si128 (__A, __B), _mm_setzero_si128()); } -static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 -_mm256_test_epi8_mask (__m256i __A, __m256i __B) -{ +static __inline__ __mmask32 __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_test_epi8_mask(__m256i __A, __m256i __B) { return _mm256_cmpneq_epi8_mask (_mm256_and_si256(__A, __B), _mm256_setzero_si256()); } @@ -2439,9 +2436,8 @@ _mm256_mask_test_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B) _mm256_setzero_si256()); } -static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 -_mm_testn_epi8_mask (__m128i __A, __m128i __B) -{ +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_testn_epi8_mask(__m128i __A, __m128i __B) { return _mm_cmpeq_epi8_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128()); } diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp index f04cc45..fb4d0b45 100644 --- a/clang/lib/Sema/SemaConcept.cpp +++ b/clang/lib/Sema/SemaConcept.cpp @@ -2408,11 +2408,16 @@ const NormalizedConstraint *Sema::getNormalizedAssociatedConstraints( if (CacheEntry == NormalizationCache.end()) { auto *Normalized = NormalizedConstraint::fromAssociatedConstraints( *this, ND, AssociatedConstraints); + if (!Normalized) { + NormalizationCache.try_emplace(ConstrainedDeclOrNestedReq, nullptr); + return nullptr; + } + // substitute() can invalidate iterators of NormalizationCache. + bool Failed = SubstituteParameterMappings(*this).substitute(*Normalized); CacheEntry = NormalizationCache.try_emplace(ConstrainedDeclOrNestedReq, Normalized) .first; - if (!Normalized || - SubstituteParameterMappings(*this).substitute(*Normalized)) + if (Failed) return nullptr; } return CacheEntry->second; |
