diff options
Diffstat (limited to 'clang/lib/CodeGen/TargetBuiltins')
-rw-r--r-- | clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp | 29 | ||||
-rw-r--r-- | clang/lib/CodeGen/TargetBuiltins/ARM.cpp | 22 | ||||
-rw-r--r-- | clang/lib/CodeGen/TargetBuiltins/PPC.cpp | 2 |
3 files changed, 27 insertions, 26 deletions
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp index 5049a0a..f49a5af 100644 --- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp +++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp @@ -12,6 +12,7 @@ #include "CGBuiltin.h" #include "CodeGenFunction.h" +#include "clang/Basic/SyncScope.h" #include "clang/Basic/TargetBuiltins.h" #include "clang/Frontend/FrontendDiagnostic.h" #include "llvm/Analysis/ValueTracking.h" @@ -313,33 +314,33 @@ void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope, } // Older builtins had an enum argument for the memory scope. + const char *SSN = nullptr; int scope = cast<llvm::ConstantInt>(Scope)->getZExtValue(); switch (scope) { - case 0: // __MEMORY_SCOPE_SYSTEM + case AtomicScopeGenericModel::System: // __MEMORY_SCOPE_SYSTEM SSID = llvm::SyncScope::System; break; - case 1: // __MEMORY_SCOPE_DEVICE - if (getTarget().getTriple().isSPIRV()) - SSID = getLLVMContext().getOrInsertSyncScopeID("device"); - else - SSID = getLLVMContext().getOrInsertSyncScopeID("agent"); + case AtomicScopeGenericModel::Device: // __MEMORY_SCOPE_DEVICE + SSN = getTarget().getTriple().isSPIRV() ? "device" : "agent"; break; - case 2: // __MEMORY_SCOPE_WRKGRP - SSID = getLLVMContext().getOrInsertSyncScopeID("workgroup"); + case AtomicScopeGenericModel::Workgroup: // __MEMORY_SCOPE_WRKGRP + SSN = "workgroup"; break; - case 3: // __MEMORY_SCOPE_WVFRNT - if (getTarget().getTriple().isSPIRV()) - SSID = getLLVMContext().getOrInsertSyncScopeID("subgroup"); - else - SSID = getLLVMContext().getOrInsertSyncScopeID("wavefront"); + case AtomicScopeGenericModel::Cluster: // __MEMORY_SCOPE_CLUSTR + SSN = getTarget().getTriple().isSPIRV() ? "workgroup" : "cluster"; + break; + case AtomicScopeGenericModel::Wavefront: // __MEMORY_SCOPE_WVFRNT + SSN = getTarget().getTriple().isSPIRV() ? "subgroup" : "wavefront"; break; - case 4: // __MEMORY_SCOPE_SINGLE + case AtomicScopeGenericModel::Single: // __MEMORY_SCOPE_SINGLE SSID = llvm::SyncScope::SingleThread; break; default: SSID = llvm::SyncScope::System; break; } + if (SSN) + SSID = getLLVMContext().getOrInsertSyncScopeID(SSN); } llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments, diff --git a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp index 2429a43..60f9b86 100644 --- a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp +++ b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp @@ -2037,7 +2037,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr( case NEON::BI__builtin_neon_vld1q_x3_v: case NEON::BI__builtin_neon_vld1_x4_v: case NEON::BI__builtin_neon_vld1q_x4_v: { - llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; + llvm::Type *Tys[2] = {VTy, DefaultPtrTy}; Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN"); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); @@ -2263,11 +2263,11 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr( // in AArch64 it comes last. We may want to stick to one or another. if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be || Arch == llvm::Triple::aarch64_32) { - llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; + llvm::Type *Tys[2] = {VTy, DefaultPtrTy}; std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); } - llvm::Type *Tys[2] = {UnqualPtrTy, VTy}; + llvm::Type *Tys[2] = {DefaultPtrTy, VTy}; return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); } case NEON::BI__builtin_neon_vsubhn_v: { @@ -2858,7 +2858,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, Function *F = CGM.getIntrinsic( BuiltinID == clang::ARM::BI__builtin_arm_ldaex ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex, - UnqualPtrTy); + DefaultPtrTy); CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldrex"); Val->addParamAttr( 0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy)); @@ -5225,7 +5225,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr, - UnqualPtrTy); + DefaultPtrTy); CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldxr"); Val->addParamAttr( 0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy)); @@ -7482,42 +7482,42 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, } case NEON::BI__builtin_neon_vld2_v: case NEON::BI__builtin_neon_vld2q_v: { - llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; + llvm::Type *Tys[2] = {VTy, DefaultPtrTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vld3_v: case NEON::BI__builtin_neon_vld3q_v: { - llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; + llvm::Type *Tys[2] = {VTy, DefaultPtrTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vld4_v: case NEON::BI__builtin_neon_vld4q_v: { - llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; + llvm::Type *Tys[2] = {VTy, DefaultPtrTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vld2_dup_v: case NEON::BI__builtin_neon_vld2q_dup_v: { - llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; + llvm::Type *Tys[2] = {VTy, DefaultPtrTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vld3_dup_v: case NEON::BI__builtin_neon_vld3q_dup_v: { - llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; + llvm::Type *Tys[2] = {VTy, DefaultPtrTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vld4_dup_v: case NEON::BI__builtin_neon_vld4q_dup_v: { - llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; + llvm::Type *Tys[2] = {VTy, DefaultPtrTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); diff --git a/clang/lib/CodeGen/TargetBuiltins/PPC.cpp b/clang/lib/CodeGen/TargetBuiltins/PPC.cpp index e71dc9e..44d5938 100644 --- a/clang/lib/CodeGen/TargetBuiltins/PPC.cpp +++ b/clang/lib/CodeGen/TargetBuiltins/PPC.cpp @@ -59,7 +59,7 @@ static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF, Constraints += MachineClobbers; } - llvm::Type *PtrType = CGF.UnqualPtrTy; + llvm::Type *PtrType = CGF.DefaultPtrTy; llvm::FunctionType *FTy = llvm::FunctionType::get(RetType, {PtrType}, false); llvm::InlineAsm *IA = |