diff options
Diffstat (limited to 'clang/lib/CodeGen')
-rw-r--r-- | clang/lib/CodeGen/CGAtomic.cpp | 12 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGOpenMPRuntime.cpp | 5 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp | 11 | ||||
-rw-r--r-- | clang/lib/CodeGen/TargetBuiltins/ARM.cpp | 24 | ||||
-rw-r--r-- | clang/lib/CodeGen/Targets/RISCV.cpp | 24 | ||||
-rw-r--r-- | clang/lib/CodeGen/Targets/SystemZ.cpp | 4 |
6 files changed, 48 insertions, 32 deletions
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp index 4a3446a..d95dab3 100644 --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -507,6 +507,18 @@ static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder, bool IsSigned, llvm::Value *OldVal, llvm::Value *RHS) { + const bool IsFP = OldVal->getType()->isFloatingPointTy(); + + if (IsFP) { + llvm::Intrinsic::ID IID = (Op == AtomicExpr::AO__atomic_max_fetch || + Op == AtomicExpr::AO__scoped_atomic_max_fetch) + ? llvm::Intrinsic::maxnum + : llvm::Intrinsic::minnum; + + return Builder.CreateBinaryIntrinsic(IID, OldVal, RHS, llvm::FMFSource(), + "newval"); + } + llvm::CmpInst::Predicate Pred; switch (Op) { default: diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp index fa94692..1ff2be7 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp @@ -1762,8 +1762,11 @@ void CGOpenMPRuntime::emitDeclareTargetFunction(const FunctionDecl *FD, // access its value. llvm::GlobalValue *Addr = GV; if (CGM.getLangOpts().OpenMPIsTargetDevice) { + llvm::PointerType *FnPtrTy = llvm::PointerType::get( + CGM.getLLVMContext(), + CGM.getModule().getDataLayout().getProgramAddressSpace()); Addr = new llvm::GlobalVariable( - CGM.getModule(), CGM.VoidPtrTy, + CGM.getModule(), FnPtrTy, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, GV, Name, nullptr, llvm::GlobalValue::NotThreadLocal, CGM.getModule().getDataLayout().getDefaultGlobalsAddressSpace()); diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp index 3613b6a..fddeba9 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// // // This provides a generalized class for OpenMP runtime code generation -// specialized by GPU targets NVPTX and AMDGCN. +// specialized by GPU targets NVPTX, AMDGCN and SPIR-V. // //===----------------------------------------------------------------------===// @@ -1242,12 +1242,13 @@ void CGOpenMPRuntimeGPU::emitParallelCall( CGBuilderTy &Bld = CGF.Builder; llvm::Value *NumThreadsVal = NumThreads; llvm::Function *WFn = WrapperFunctionsMap[OutlinedFn]; - llvm::Value *ID = llvm::ConstantPointerNull::get(CGM.Int8PtrTy); - if (WFn) - ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy); - llvm::Type *FnPtrTy = llvm::PointerType::get( + llvm::PointerType *FnPtrTy = llvm::PointerType::get( CGF.getLLVMContext(), CGM.getDataLayout().getProgramAddressSpace()); + llvm::Value *ID = llvm::ConstantPointerNull::get(FnPtrTy); + if (WFn) + ID = Bld.CreateBitOrPointerCast(WFn, FnPtrTy); + llvm::Value *FnPtr = Bld.CreateBitOrPointerCast(OutlinedFn, FnPtrTy); // Create a private scope that will globalize the arguments diff --git a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp index 82b71e3..2429a43 100644 --- a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp +++ b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp @@ -7795,7 +7795,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, } case NEON::BI__builtin_neon_vcvt1_low_bf16_mf8_fpm: ExtractLow = true; - LLVM_FALLTHROUGH; + [[fallthrough]]; case NEON::BI__builtin_neon_vcvt1_bf16_mf8_fpm: case NEON::BI__builtin_neon_vcvt1_high_bf16_mf8_fpm: return EmitFP8NeonCvtCall(Intrinsic::aarch64_neon_fp8_cvtl1, @@ -7803,7 +7803,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, Ops[0]->getType(), ExtractLow, Ops, E, "vbfcvt1"); case NEON::BI__builtin_neon_vcvt2_low_bf16_mf8_fpm: ExtractLow = true; - LLVM_FALLTHROUGH; + [[fallthrough]]; case NEON::BI__builtin_neon_vcvt2_bf16_mf8_fpm: case NEON::BI__builtin_neon_vcvt2_high_bf16_mf8_fpm: return EmitFP8NeonCvtCall(Intrinsic::aarch64_neon_fp8_cvtl2, @@ -7811,7 +7811,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, Ops[0]->getType(), ExtractLow, Ops, E, "vbfcvt2"); case NEON::BI__builtin_neon_vcvt1_low_f16_mf8_fpm: ExtractLow = true; - LLVM_FALLTHROUGH; + [[fallthrough]]; case NEON::BI__builtin_neon_vcvt1_f16_mf8_fpm: case NEON::BI__builtin_neon_vcvt1_high_f16_mf8_fpm: return EmitFP8NeonCvtCall(Intrinsic::aarch64_neon_fp8_cvtl1, @@ -7819,7 +7819,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, Ops[0]->getType(), ExtractLow, Ops, E, "vbfcvt1"); case NEON::BI__builtin_neon_vcvt2_low_f16_mf8_fpm: ExtractLow = true; - LLVM_FALLTHROUGH; + [[fallthrough]]; case NEON::BI__builtin_neon_vcvt2_f16_mf8_fpm: case NEON::BI__builtin_neon_vcvt2_high_f16_mf8_fpm: return EmitFP8NeonCvtCall(Intrinsic::aarch64_neon_fp8_cvtl2, @@ -7854,7 +7854,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, case NEON::BI__builtin_neon_vdot_lane_f16_mf8_fpm: case NEON::BI__builtin_neon_vdotq_lane_f16_mf8_fpm: ExtendLaneArg = true; - LLVM_FALLTHROUGH; + [[fallthrough]]; case NEON::BI__builtin_neon_vdot_laneq_f16_mf8_fpm: case NEON::BI__builtin_neon_vdotq_laneq_f16_mf8_fpm: return EmitFP8NeonFDOTCall(Intrinsic::aarch64_neon_fp8_fdot2_lane, @@ -7866,7 +7866,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, case NEON::BI__builtin_neon_vdot_lane_f32_mf8_fpm: case NEON::BI__builtin_neon_vdotq_lane_f32_mf8_fpm: ExtendLaneArg = true; - LLVM_FALLTHROUGH; + [[fallthrough]]; case NEON::BI__builtin_neon_vdot_laneq_f32_mf8_fpm: case NEON::BI__builtin_neon_vdotq_laneq_f32_mf8_fpm: return EmitFP8NeonFDOTCall(Intrinsic::aarch64_neon_fp8_fdot4_lane, @@ -7898,37 +7898,37 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, "vmlall"); case NEON::BI__builtin_neon_vmlalbq_lane_f16_mf8_fpm: ExtendLaneArg = true; - LLVM_FALLTHROUGH; + [[fallthrough]]; case NEON::BI__builtin_neon_vmlalbq_laneq_f16_mf8_fpm: return EmitFP8NeonFMLACall(Intrinsic::aarch64_neon_fp8_fmlalb_lane, ExtendLaneArg, HalfTy, Ops, E, "vmlal_lane"); case NEON::BI__builtin_neon_vmlaltq_lane_f16_mf8_fpm: ExtendLaneArg = true; - LLVM_FALLTHROUGH; + [[fallthrough]]; case NEON::BI__builtin_neon_vmlaltq_laneq_f16_mf8_fpm: return EmitFP8NeonFMLACall(Intrinsic::aarch64_neon_fp8_fmlalt_lane, ExtendLaneArg, HalfTy, Ops, E, "vmlal_lane"); case NEON::BI__builtin_neon_vmlallbbq_lane_f32_mf8_fpm: ExtendLaneArg = true; - LLVM_FALLTHROUGH; + [[fallthrough]]; case NEON::BI__builtin_neon_vmlallbbq_laneq_f32_mf8_fpm: return EmitFP8NeonFMLACall(Intrinsic::aarch64_neon_fp8_fmlallbb_lane, ExtendLaneArg, FloatTy, Ops, E, "vmlall_lane"); case NEON::BI__builtin_neon_vmlallbtq_lane_f32_mf8_fpm: ExtendLaneArg = true; - LLVM_FALLTHROUGH; + [[fallthrough]]; case NEON::BI__builtin_neon_vmlallbtq_laneq_f32_mf8_fpm: return EmitFP8NeonFMLACall(Intrinsic::aarch64_neon_fp8_fmlallbt_lane, ExtendLaneArg, FloatTy, Ops, E, "vmlall_lane"); case NEON::BI__builtin_neon_vmlalltbq_lane_f32_mf8_fpm: ExtendLaneArg = true; - LLVM_FALLTHROUGH; + [[fallthrough]]; case NEON::BI__builtin_neon_vmlalltbq_laneq_f32_mf8_fpm: return EmitFP8NeonFMLACall(Intrinsic::aarch64_neon_fp8_fmlalltb_lane, ExtendLaneArg, FloatTy, Ops, E, "vmlall_lane"); case NEON::BI__builtin_neon_vmlallttq_lane_f32_mf8_fpm: ExtendLaneArg = true; - LLVM_FALLTHROUGH; + [[fallthrough]]; case NEON::BI__builtin_neon_vmlallttq_laneq_f32_mf8_fpm: return EmitFP8NeonFMLACall(Intrinsic::aarch64_neon_fp8_fmlalltt_lane, ExtendLaneArg, FloatTy, Ops, E, "vmlall_lane"); diff --git a/clang/lib/CodeGen/Targets/RISCV.cpp b/clang/lib/CodeGen/Targets/RISCV.cpp index 0ef39b6..0d0941e 100644 --- a/clang/lib/CodeGen/Targets/RISCV.cpp +++ b/clang/lib/CodeGen/Targets/RISCV.cpp @@ -680,22 +680,22 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed, if (const auto *ED = Ty->getAsEnumDecl()) Ty = ED->getIntegerType(); - // All integral types are promoted to XLen width - if (Size < XLen && Ty->isIntegralOrEnumerationType()) { - return extendType(Ty, CGT.ConvertType(Ty)); - } - if (const auto *EIT = Ty->getAs<BitIntType>()) { - if (EIT->getNumBits() < XLen) + + if (XLen == 64 && EIT->getNumBits() == 32) return extendType(Ty, CGT.ConvertType(Ty)); - if (EIT->getNumBits() > 128 || - (!getContext().getTargetInfo().hasInt128Type() && - EIT->getNumBits() > 64)) - return getNaturalAlignIndirect( - Ty, /*AddrSpace=*/getDataLayout().getAllocaAddrSpace(), - /*ByVal=*/false); + + if (EIT->getNumBits() <= 2 * XLen) + return ABIArgInfo::getExtend(Ty, CGT.ConvertType(Ty)); + return getNaturalAlignIndirect( + Ty, /*AddrSpace=*/getDataLayout().getAllocaAddrSpace(), + /*ByVal=*/false); } + // All integral types are promoted to XLen width + if (Size < XLen && Ty->isIntegralOrEnumerationType()) + return extendType(Ty, CGT.ConvertType(Ty)); + return ABIArgInfo::getDirect(); } diff --git a/clang/lib/CodeGen/Targets/SystemZ.cpp b/clang/lib/CodeGen/Targets/SystemZ.cpp index 9b6b72b1..e50f06c 100644 --- a/clang/lib/CodeGen/Targets/SystemZ.cpp +++ b/clang/lib/CodeGen/Targets/SystemZ.cpp @@ -193,11 +193,11 @@ llvm::Type *SystemZABIInfo::getFPArgumentType(QualType Ty, case BuiltinType::Float16: if (Size == 16) return llvm::Type::getHalfTy(getVMContext()); - LLVM_FALLTHROUGH; + [[fallthrough]]; case BuiltinType::Float: if (Size == 32) return llvm::Type::getFloatTy(getVMContext()); - LLVM_FALLTHROUGH; + [[fallthrough]]; case BuiltinType::Double: return llvm::Type::getDoubleTy(getVMContext()); default: |