diff options
Diffstat (limited to 'clang')
57 files changed, 3635 insertions, 1149 deletions
diff --git a/clang/include/clang/Basic/BuiltinsNVPTX.td b/clang/include/clang/Basic/BuiltinsNVPTX.td index 2d6fa17..d923d2a 100644 --- a/clang/include/clang/Basic/BuiltinsNVPTX.td +++ b/clang/include/clang/Basic/BuiltinsNVPTX.td @@ -579,11 +579,35 @@ def __nvvm_ff2bf16x2_rn : NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(float, float) def __nvvm_ff2bf16x2_rn_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(float, float)", SM_80, PTX70>; def __nvvm_ff2bf16x2_rz : NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(float, float)", SM_80, PTX70>; def __nvvm_ff2bf16x2_rz_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(float, float)", SM_80, PTX70>; +def __nvvm_ff2bf16x2_rs : + NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(float, float, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_ff2bf16x2_rs_relu : + NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(float, float, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_ff2bf16x2_rs_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(float, float, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_ff2bf16x2_rs_relu_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(float, float, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; def __nvvm_ff2f16x2_rn : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(float, float)", SM_80, PTX70>; def __nvvm_ff2f16x2_rn_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(float, float)", SM_80, PTX70>; def __nvvm_ff2f16x2_rz : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(float, float)", SM_80, PTX70>; def __nvvm_ff2f16x2_rz_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(float, float)", SM_80, PTX70>; +def __nvvm_ff2f16x2_rs : + NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(float, float, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_ff2f16x2_rs_relu : + NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(float, float, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_ff2f16x2_rs_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(float, float, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_ff2f16x2_rs_relu_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(float, float, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; def __nvvm_f2bf16_rn : NVPTXBuiltinSMAndPTX<"__bf16(float)", SM_80, PTX70>; def __nvvm_f2bf16_rn_relu : NVPTXBuiltinSMAndPTX<"__bf16(float)", SM_80, PTX70>; @@ -616,6 +640,19 @@ def __nvvm_e4m3x2_to_f16x2_rn_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(sh def __nvvm_e5m2x2_to_f16x2_rn : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(short)", SM_89, PTX81>; def __nvvm_e5m2x2_to_f16x2_rn_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(short)", SM_89, PTX81>; +def __nvvm_f32x4_to_e4m3x4_rs_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<4, char>(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_f32x4_to_e4m3x4_rs_relu_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<4, char>(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_f32x4_to_e5m2x4_rs_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<4, char>(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_f32x4_to_e5m2x4_rs_relu_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<4, char>(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; + def __nvvm_ff_to_e2m3x2_rn_satfinite : NVPTXBuiltinSMAndPTX<"short(float, float)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; def __nvvm_ff_to_e2m3x2_rn_relu_satfinite : NVPTXBuiltinSMAndPTX<"short(float, float)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; def __nvvm_ff_to_e3m2x2_rn_satfinite : NVPTXBuiltinSMAndPTX<"short(float, float)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; @@ -626,12 +663,32 @@ def __nvvm_e2m3x2_to_f16x2_rn_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(sh def __nvvm_e3m2x2_to_f16x2_rn : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(short)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; def __nvvm_e3m2x2_to_f16x2_rn_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(short)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; +def __nvvm_f32x4_to_e2m3x4_rs_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<4, char>(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_f32x4_to_e2m3x4_rs_relu_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<4, char>(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_f32x4_to_e3m2x4_rs_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<4, char>(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_f32x4_to_e3m2x4_rs_relu_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<4, char>(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; + def __nvvm_ff_to_e2m1x2_rn_satfinite : NVPTXBuiltinSMAndPTX<"short(float, float)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; def __nvvm_ff_to_e2m1x2_rn_relu_satfinite : NVPTXBuiltinSMAndPTX<"short(float, float)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; def __nvvm_e2m1x2_to_f16x2_rn : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(short)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; def __nvvm_e2m1x2_to_f16x2_rn_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(short)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; +def __nvvm_f32x4_to_e2m1x4_rs_satfinite : + NVPTXBuiltinSMAndPTX<"short(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_f32x4_to_e2m1x4_rs_relu_satfinite : + NVPTXBuiltinSMAndPTX<"short(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; + def __nvvm_ff_to_ue8m0x2_rz : NVPTXBuiltinSMAndPTX<"short(float, float)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; def __nvvm_ff_to_ue8m0x2_rz_satfinite : NVPTXBuiltinSMAndPTX<"short(float, float)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; def __nvvm_ff_to_ue8m0x2_rp : NVPTXBuiltinSMAndPTX<"short(float, float)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; diff --git a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td index a0181b7..4165225 100644 --- a/clang/include/clang/Basic/BuiltinsX86.td +++ b/clang/include/clang/Basic/BuiltinsX86.td @@ -2409,28 +2409,36 @@ let Features = "avx512f", Attributes = [NoThrow, Const, RequiredVectorWidth<512> def psraq512 : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, _Vector<2, long long int>)">; def psrld512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<4, int>)">; def psrlq512 : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, _Vector<2, long long int>)">; +} + +let Features = "avx512f", + Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in { def pternlogd512_mask : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<16, int>, _Vector<16, int>, _Constant int, unsigned short)">; def pternlogd512_maskz : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<16, int>, _Vector<16, int>, _Constant int, unsigned short)">; def pternlogq512_mask : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, _Vector<8, long long int>, _Vector<8, long long int>, _Constant int, unsigned char)">; def pternlogq512_maskz : X86Builtin<"_Vector<8, long long int>(_Vector<8, long long int>, _Vector<8, long long int>, _Vector<8, long long int>, _Constant int, unsigned char)">; } -let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { +let Features = "avx512vl", + Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in { def pternlogd128_mask : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>, _Vector<4, int>, _Constant int, unsigned char)">; def pternlogd128_maskz : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>, _Vector<4, int>, _Constant int, unsigned char)">; } -let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { +let Features = "avx512vl", + Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in { def pternlogd256_mask : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>, _Vector<8, int>, _Constant int, unsigned char)">; def pternlogd256_maskz : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>, _Vector<8, int>, _Constant int, unsigned char)">; } -let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { +let Features = "avx512vl", + Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in { def pternlogq128_mask : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>, _Vector<2, long long int>, _Constant int, unsigned char)">; def pternlogq128_maskz : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>, _Vector<2, long long int>, _Constant int, unsigned char)">; } -let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { +let Features = "avx512vl", + Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in { def pternlogq256_mask : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<4, long long int>, _Vector<4, long long int>, _Constant int, unsigned char)">; def pternlogq256_maskz : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<4, long long int>, _Vector<4, long long int>, _Constant int, unsigned char)">; } diff --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def index 84f5ab3..9e85008 100644 --- a/clang/include/clang/Basic/LangOptions.def +++ b/clang/include/clang/Basic/LangOptions.def @@ -245,7 +245,6 @@ LANGOPT(HLSLStrictAvailability, 1, 0, NotCompatible, LANGOPT(HLSLSpvUseUnknownImageFormat, 1, 0, NotCompatible, "For storage images and texel buffers, sets the default format to 'Unknown' when not specified via the `vk::image_format` attribute. If this option is not used, the format is inferred from the resource's data type.") LANGOPT(CUDAIsDevice , 1, 0, NotCompatible, "compiling for CUDA device") -LANGOPT(CUDAAllowVariadicFunctions, 1, 0, NotCompatible, "allowing variadic functions in CUDA device code") LANGOPT(CUDAHostDeviceConstexpr, 1, 1, NotCompatible, "treating unattributed constexpr functions as __host__ __device__") LANGOPT(GPUDeviceApproxTranscendentals, 1, 0, NotCompatible, "using approximate transcendental functions") LANGOPT(GPURelocatableDeviceCode, 1, 0, NotCompatible, "generate relocatable device code") diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index 569491a..89b519e 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -125,6 +125,7 @@ public: cir::ConstantOp getTrue(mlir::Location loc) { return getBool(true, loc); } cir::BoolType getBoolTy() { return cir::BoolType::get(getContext()); } + cir::VoidType getVoidTy() { return cir::VoidType::get(getContext()); } cir::PointerType getPointerTo(mlir::Type ty) { return cir::PointerType::get(ty); diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index f7ca276..f795800 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -37,6 +37,11 @@ struct MissingFeatures { static bool opGlobalDLLImportExport() { return false; } static bool opGlobalPartition() { return false; } static bool opGlobalUsedOrCompilerUsed() { return false; } + static bool opGlobalAnnotations() { return false; } + static bool opGlobalDtorLowering() { return false; } + static bool opGlobalCtorAttr() { return false; } + static bool opGlobalCtorPriority() { return false; } + static bool opGlobalCtorList() { return false; } static bool setDSOLocal() { return false; } static bool setComdat() { return false; } diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 5a48f0b..9bfa1dd 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -8733,8 +8733,7 @@ def fcuda_include_gpubinary : Separate<["-"], "fcuda-include-gpubinary">, HelpText<"Incorporate CUDA device-side binary into host object file.">, MarshallingInfoString<CodeGenOpts<"CudaGpuBinaryFileName">>; def fcuda_allow_variadic_functions : Flag<["-"], "fcuda-allow-variadic-functions">, - HelpText<"Allow variadic functions in CUDA device code.">, - MarshallingInfoFlag<LangOpts<"CUDAAllowVariadicFunctions">>; + HelpText<"Deprecated; Allow variadic functions in CUDA device code.">; def fno_cuda_host_device_constexpr : Flag<["-"], "fno-cuda-host-device-constexpr">, HelpText<"Don't treat unattributed constexpr functions as __host__ __device__.">, MarshallingInfoNegativeFlag<LangOpts<"CUDAHostDeviceConstexpr">>; diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp index 21af3d6..8904396 100644 --- a/clang/lib/AST/ByteCode/Interp.cpp +++ b/clang/lib/AST/ByteCode/Interp.cpp @@ -1638,6 +1638,36 @@ bool Call(InterpState &S, CodePtr OpPC, const Function *Func, return true; } +static bool GetDynamicDecl(InterpState &S, CodePtr OpPC, Pointer TypePtr, + const CXXRecordDecl *&DynamicDecl) { + while (TypePtr.isBaseClass()) + TypePtr = TypePtr.getBase(); + + QualType DynamicType = TypePtr.getType(); + if (TypePtr.isStatic() || TypePtr.isConst()) { + const VarDecl *VD = TypePtr.getDeclDesc()->asVarDecl(); + if (!VD->isConstexpr()) { + const Expr *E = S.Current->getExpr(OpPC); + APValue V = TypePtr.toAPValue(S.getASTContext()); + QualType TT = S.getASTContext().getLValueReferenceType(DynamicType); + S.FFDiag(E, diag::note_constexpr_polymorphic_unknown_dynamic_type) + << AccessKinds::AK_MemberCall << V.getAsString(S.getASTContext(), TT); + return false; + } + } + + if (DynamicType->isPointerType() || DynamicType->isReferenceType()) { + DynamicDecl = DynamicType->getPointeeCXXRecordDecl(); + } else if (DynamicType->isArrayType()) { + const Type *ElemType = DynamicType->getPointeeOrArrayElementType(); + assert(ElemType); + DynamicDecl = ElemType->getAsCXXRecordDecl(); + } else { + DynamicDecl = DynamicType->getAsCXXRecordDecl(); + } + return true; +} + bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func, uint32_t VarArgSize) { assert(Func->hasThisPointer()); @@ -1662,17 +1692,8 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func, } const CXXRecordDecl *DynamicDecl = nullptr; - { - Pointer TypePtr = ThisPtr; - while (TypePtr.isBaseClass()) - TypePtr = TypePtr.getBase(); - - QualType DynamicType = TypePtr.getType(); - if (DynamicType->isPointerType() || DynamicType->isReferenceType()) - DynamicDecl = DynamicType->getPointeeCXXRecordDecl(); - else - DynamicDecl = DynamicType->getAsCXXRecordDecl(); - } + if (!GetDynamicDecl(S, OpPC, ThisPtr, DynamicDecl)) + return false; assert(DynamicDecl); const auto *StaticDecl = cast<CXXRecordDecl>(Func->getParentDecl()); diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp index 68ebfdf..6af7ef3 100644 --- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp +++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp @@ -736,25 +736,6 @@ static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, return true; } -/// rotateleft(value, amount) -static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC, - const InterpFrame *Frame, - const CallExpr *Call, bool Right) { - APSInt Amount = popToAPSInt(S, Call->getArg(1)); - APSInt Value = popToAPSInt(S, Call->getArg(0)); - - APSInt Result; - if (Right) - Result = APSInt(Value.rotr(Amount.urem(Value.getBitWidth())), - /*IsUnsigned=*/true); - else // Left. - Result = APSInt(Value.rotl(Amount.urem(Value.getBitWidth())), - /*IsUnsigned=*/true); - - pushInteger(S, Result, Call->getType()); - return true; -} - static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { @@ -2916,7 +2897,49 @@ static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC, }); Dst.initializeAllElements(); + return true; +} + +static bool interp__builtin_ia32_pternlog(InterpState &S, CodePtr OpPC, + const CallExpr *Call, bool MaskZ) { + assert(Call->getNumArgs() == 5); + APInt U = popToAPSInt(S, Call->getArg(4)); // Lane mask + APInt Imm = popToAPSInt(S, Call->getArg(3)); // Ternary truth table + const Pointer &C = S.Stk.pop<Pointer>(); + const Pointer &B = S.Stk.pop<Pointer>(); + const Pointer &A = S.Stk.pop<Pointer>(); + const Pointer &Dst = S.Stk.peek<Pointer>(); + + unsigned DstLen = A.getNumElems(); + const QualType ElemQT = getElemType(A); + const OptPrimType ElemPT = S.getContext().classify(ElemQT); + unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT); + bool DstUnsigned = ElemQT->isUnsignedIntegerOrEnumerationType(); + + INT_TYPE_SWITCH_NO_BOOL(*ElemPT, { + for (unsigned I = 0; I != DstLen; ++I) { + APInt ALane = A.elem<T>(I).toAPSInt(); + APInt BLane = B.elem<T>(I).toAPSInt(); + APInt CLane = C.elem<T>(I).toAPSInt(); + APInt RLane(LaneWidth, 0); + if (U[I]) { // If lane not masked, compute ternary logic. + for (unsigned Bit = 0; Bit != LaneWidth; ++Bit) { + unsigned ABit = ALane[Bit]; + unsigned BBit = BLane[Bit]; + unsigned CBit = CLane[Bit]; + unsigned Idx = (ABit << 2) | (BBit << 1) | (CBit); + RLane.setBitVal(Bit, Imm[Idx]); + } + Dst.elem<T>(I) = static_cast<T>(APSInt(RLane, DstUnsigned)); + } else if (MaskZ) { // If zero masked, zero the lane. + Dst.elem<T>(I) = static_cast<T>(APSInt(RLane, DstUnsigned)); + } else { // Just masked, put in A lane. + Dst.elem<T>(I) = static_cast<T>(APSInt(ALane, DstUnsigned)); + } + } + }); + Dst.initializeAllElements(); return true; } @@ -3160,7 +3183,10 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, case Builtin::BI_rotl: case Builtin::BI_lrotl: case Builtin::BI_rotl64: - return interp__builtin_rotate(S, OpPC, Frame, Call, /*Right=*/false); + return interp__builtin_elementwise_int_binop( + S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) -> APInt { + return Value.rotl(Amount); + }); case Builtin::BI__builtin_rotateright8: case Builtin::BI__builtin_rotateright16: @@ -3171,7 +3197,10 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, case Builtin::BI_rotr: case Builtin::BI_lrotr: case Builtin::BI_rotr64: - return interp__builtin_rotate(S, OpPC, Frame, Call, /*Right=*/true); + return interp__builtin_elementwise_int_binop( + S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) -> APInt { + return Value.rotr(Amount); + }); case Builtin::BI__builtin_ffs: case Builtin::BI__builtin_ffsl: @@ -3773,6 +3802,20 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; }); + case X86::BI__builtin_ia32_pternlogd128_mask: + case X86::BI__builtin_ia32_pternlogd256_mask: + case X86::BI__builtin_ia32_pternlogd512_mask: + case X86::BI__builtin_ia32_pternlogq128_mask: + case X86::BI__builtin_ia32_pternlogq256_mask: + case X86::BI__builtin_ia32_pternlogq512_mask: + return interp__builtin_ia32_pternlog(S, OpPC, Call, /*MaskZ=*/false); + case X86::BI__builtin_ia32_pternlogd128_maskz: + case X86::BI__builtin_ia32_pternlogd256_maskz: + case X86::BI__builtin_ia32_pternlogd512_maskz: + case X86::BI__builtin_ia32_pternlogq128_maskz: + case X86::BI__builtin_ia32_pternlogq256_maskz: + case X86::BI__builtin_ia32_pternlogq512_maskz: + return interp__builtin_ia32_pternlog(S, OpPC, Call, /*MaskZ=*/true); case Builtin::BI__builtin_elementwise_fshl: return interp__builtin_elementwise_triop(S, OpPC, Call, llvm::APIntOps::fshl); diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index 7bf28d9..0b23eed 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -12168,6 +12168,97 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { return Success(R, E); } + case X86::BI__builtin_ia32_pternlogd128_mask: + case X86::BI__builtin_ia32_pternlogd256_mask: + case X86::BI__builtin_ia32_pternlogd512_mask: + case X86::BI__builtin_ia32_pternlogq128_mask: + case X86::BI__builtin_ia32_pternlogq256_mask: + case X86::BI__builtin_ia32_pternlogq512_mask: { + APValue AValue, BValue, CValue, ImmValue, UValue; + if (!EvaluateAsRValue(Info, E->getArg(0), AValue) || + !EvaluateAsRValue(Info, E->getArg(1), BValue) || + !EvaluateAsRValue(Info, E->getArg(2), CValue) || + !EvaluateAsRValue(Info, E->getArg(3), ImmValue) || + !EvaluateAsRValue(Info, E->getArg(4), UValue)) + return false; + + QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType(); + bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType(); + APInt Imm = ImmValue.getInt(); + APInt U = UValue.getInt(); + unsigned ResultLen = AValue.getVectorLength(); + SmallVector<APValue, 16> ResultElements; + ResultElements.reserve(ResultLen); + + for (unsigned EltNum = 0; EltNum < ResultLen; ++EltNum) { + APInt ALane = AValue.getVectorElt(EltNum).getInt(); + APInt BLane = BValue.getVectorElt(EltNum).getInt(); + APInt CLane = CValue.getVectorElt(EltNum).getInt(); + + if (U[EltNum]) { + unsigned BitWidth = ALane.getBitWidth(); + APInt ResLane(BitWidth, 0); + + for (unsigned Bit = 0; Bit < BitWidth; ++Bit) { + unsigned ABit = ALane[Bit]; + unsigned BBit = BLane[Bit]; + unsigned CBit = CLane[Bit]; + + unsigned Idx = (ABit << 2) | (BBit << 1) | CBit; + ResLane.setBitVal(Bit, Imm[Idx]); + } + ResultElements.push_back(APValue(APSInt(ResLane, DestUnsigned))); + } else { + ResultElements.push_back(APValue(APSInt(ALane, DestUnsigned))); + } + } + return Success(APValue(ResultElements.data(), ResultElements.size()), E); + } + case X86::BI__builtin_ia32_pternlogd128_maskz: + case X86::BI__builtin_ia32_pternlogd256_maskz: + case X86::BI__builtin_ia32_pternlogd512_maskz: + case X86::BI__builtin_ia32_pternlogq128_maskz: + case X86::BI__builtin_ia32_pternlogq256_maskz: + case X86::BI__builtin_ia32_pternlogq512_maskz: { + APValue AValue, BValue, CValue, ImmValue, UValue; + if (!EvaluateAsRValue(Info, E->getArg(0), AValue) || + !EvaluateAsRValue(Info, E->getArg(1), BValue) || + !EvaluateAsRValue(Info, E->getArg(2), CValue) || + !EvaluateAsRValue(Info, E->getArg(3), ImmValue) || + !EvaluateAsRValue(Info, E->getArg(4), UValue)) + return false; + + QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType(); + bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType(); + APInt Imm = ImmValue.getInt(); + APInt U = UValue.getInt(); + unsigned ResultLen = AValue.getVectorLength(); + SmallVector<APValue, 16> ResultElements; + ResultElements.reserve(ResultLen); + + for (unsigned EltNum = 0; EltNum < ResultLen; ++EltNum) { + APInt ALane = AValue.getVectorElt(EltNum).getInt(); + APInt BLane = BValue.getVectorElt(EltNum).getInt(); + APInt CLane = CValue.getVectorElt(EltNum).getInt(); + + unsigned BitWidth = ALane.getBitWidth(); + APInt ResLane(BitWidth, 0); + + if (U[EltNum]) { + for (unsigned Bit = 0; Bit < BitWidth; ++Bit) { + unsigned ABit = ALane[Bit]; + unsigned BBit = BLane[Bit]; + unsigned CBit = CLane[Bit]; + + unsigned Idx = (ABit << 2) | (BBit << 1) | CBit; + ResLane.setBitVal(Bit, Imm[Idx]); + } + } + ResultElements.push_back(APValue(APSInt(ResLane, DestUnsigned))); + } + return Success(APValue(ResultElements.data(), ResultElements.size()), E); + } + case Builtin::BI__builtin_elementwise_clzg: case Builtin::BI__builtin_elementwise_ctzg: { APValue SourceLHS; diff --git a/clang/lib/Basic/Targets/AMDGPU.h b/clang/lib/Basic/Targets/AMDGPU.h index 552698a..dfcc7940 100644 --- a/clang/lib/Basic/Targets/AMDGPU.h +++ b/clang/lib/Basic/Targets/AMDGPU.h @@ -319,9 +319,12 @@ public: Opts["__opencl_c_images"] = true; Opts["__opencl_c_3d_image_writes"] = true; Opts["cl_khr_3d_image_writes"] = true; + Opts["__opencl_c_program_scope_global_variables"] = true; - Opts["__opencl_c_generic_address_space"] = - GPUKind >= llvm::AMDGPU::GK_GFX700; + if (GPUKind >= llvm::AMDGPU::GK_GFX700) { + Opts["__opencl_c_generic_address_space"] = true; + Opts["__opencl_c_device_enqueue"] = true; + } } } diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp index bbc45e5..24a5fc2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp @@ -221,10 +221,9 @@ mlir::Value OpenACCRecipeBuilderBase::makeBoundsAlloca( return initialAlloca; } -mlir::Value -OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue, - mlir::Value bound, - mlir::Location loc, bool inverse) { +std::pair<mlir::Value, mlir::Value> OpenACCRecipeBuilderBase::createBoundsLoop( + mlir::Value subscriptedValue, mlir::Value subscriptedValue2, + mlir::Value bound, mlir::Location loc, bool inverse) { mlir::Operation *bodyInsertLoc; mlir::Type itrTy = cgf.cgm.convertType(cgf.getContext().UnsignedLongLongTy); @@ -249,7 +248,6 @@ OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue, return cir::PtrStrideOp::create(builder, loc, eltLoad.getType(), eltLoad, idxLoad); - }; auto forStmtBuilder = [&]() { @@ -303,6 +301,8 @@ OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue, if (subscriptedValue) subscriptedValue = doSubscriptOp(subscriptedValue, load); + if (subscriptedValue2) + subscriptedValue2 = doSubscriptOp(subscriptedValue2, load); bodyInsertLoc = builder.createYield(loc); }, /*stepBuilder=*/ @@ -325,7 +325,7 @@ OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue, // Leave the insertion point to be inside the body, so we can loop over // these things. builder.setInsertionPoint(bodyInsertLoc); - return subscriptedValue; + return {subscriptedValue, subscriptedValue2}; } mlir::acc::ReductionOperator @@ -434,7 +434,7 @@ void OpenACCRecipeBuilderBase::createInitRecipe( mlir::Location loc, mlir::Location locEnd, SourceRange exprRange, mlir::Value mainOp, mlir::Region &recipeInitRegion, size_t numBounds, llvm::ArrayRef<QualType> boundTypes, const VarDecl *allocaDecl, - QualType origType) { + QualType origType, bool emitInitExpr) { assert(allocaDecl && "Required recipe variable not set?"); CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, allocaDecl}; @@ -464,14 +464,15 @@ void OpenACCRecipeBuilderBase::createInitRecipe( // initialize this variable correctly. CIRGenFunction::AutoVarEmission tempDeclEmission = cgf.emitAutoVarAlloca(*allocaDecl, builder.saveInsertionPoint()); - cgf.emitAutoVarInit(tempDeclEmission); + if (emitInitExpr) + cgf.emitAutoVarInit(tempDeclEmission); } else { mlir::Value alloca = makeBoundsAlloca( block, exprRange, loc, allocaDecl->getName(), numBounds, boundTypes); // If the initializer is trivial, there is nothing to do here, so save // ourselves some effort. - if (allocaDecl->getInit() && + if (emitInitExpr && allocaDecl->getInit() && (!cgf.isTrivialInitializer(allocaDecl->getInit()) || cgf.getContext().getLangOpts().getTrivialAutoVarInit() != LangOptions::TrivialAutoVarInitKind::Uninitialized)) @@ -484,35 +485,42 @@ void OpenACCRecipeBuilderBase::createInitRecipe( void OpenACCRecipeBuilderBase::createFirstprivateRecipeCopy( mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, - CIRGenFunction::AutoVarEmission tempDeclEmission, - mlir::acc::FirstprivateRecipeOp recipe, const VarDecl *varRecipe, - const VarDecl *temporary) { - mlir::Block *block = - createRecipeBlock(recipe.getCopyRegion(), mainOp.getType(), loc, - /*numBounds=*/0, /*isInit=*/false); - builder.setInsertionPointToEnd(&recipe.getCopyRegion().back()); + const VarDecl *allocaDecl, const VarDecl *temporary, + mlir::Region ©Region, size_t numBounds) { + mlir::Block *block = createRecipeBlock(copyRegion, mainOp.getType(), loc, + numBounds, /*isInit=*/false); + builder.setInsertionPointToEnd(©Region.back()); CIRGenFunction::LexicalScope ls(cgf, loc, block); - mlir::BlockArgument fromArg = block->getArgument(0); - mlir::BlockArgument toArg = block->getArgument(1); + mlir::Value fromArg = block->getArgument(0); + mlir::Value toArg = block->getArgument(1); - mlir::Type elementTy = - mlir::cast<cir::PointerType>(mainOp.getType()).getPointee(); + llvm::MutableArrayRef<mlir::BlockArgument> boundsRange = + block->getArguments().drop_front(2); - // Set the address of the emission to be the argument, so that we initialize - // that instead of the variable in the other block. - tempDeclEmission.setAllocatedAddress( - Address{toArg, elementTy, cgf.getContext().getDeclAlign(varRecipe)}); + for (mlir::BlockArgument boundArg : llvm::reverse(boundsRange)) + std::tie(fromArg, toArg) = + createBoundsLoop(fromArg, toArg, boundArg, loc, /*inverse=*/false); + + // Set up the 'to' address. + mlir::Type elementTy = + mlir::cast<cir::PointerType>(toArg.getType()).getPointee(); + CIRGenFunction::AutoVarEmission tempDeclEmission(*allocaDecl); tempDeclEmission.emittedAsOffload = true; + tempDeclEmission.setAllocatedAddress( + Address{toArg, elementTy, cgf.getContext().getDeclAlign(allocaDecl)}); + // Set up the 'from' address from the temporary. CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, temporary}; cgf.setAddrOfLocalVar( temporary, - Address{fromArg, elementTy, cgf.getContext().getDeclAlign(varRecipe)}); - + Address{fromArg, elementTy, cgf.getContext().getDeclAlign(allocaDecl)}); cgf.emitAutoVarInit(tempDeclEmission); + + builder.setInsertionPointToEnd(©Region.back()); mlir::acc::YieldOp::create(builder, locEnd); } + // This function generates the 'combiner' section for a reduction recipe. Note // that this function is not 'insertion point' clean, in that it alters the // insertion point to be inside of the 'combiner' section of the recipe, but diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h index 21707ad..a5da744 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h @@ -49,14 +49,16 @@ protected: // Creates a loop through an 'acc.bounds', leaving the 'insertion' point to be // the inside of the loop body. Traverses LB->UB UNLESS `inverse` is set. // Returns the 'subscriptedValue' changed with the new bounds subscript. + std::pair<mlir::Value, mlir::Value> + createBoundsLoop(mlir::Value subscriptedValue, mlir::Value subscriptedValue2, + mlir::Value bound, mlir::Location loc, bool inverse); + mlir::Value createBoundsLoop(mlir::Value subscriptedValue, mlir::Value bound, - mlir::Location loc, bool inverse); + mlir::Location loc, bool inverse) { + return createBoundsLoop(subscriptedValue, {}, bound, loc, inverse).first; + } + mlir::acc::ReductionOperator convertReductionOp(OpenACCReductionOperator op); - void createFirstprivateRecipeCopy( - mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, - CIRGenFunction::AutoVarEmission tempDeclEmission, - mlir::acc::FirstprivateRecipeOp recipe, const VarDecl *varRecipe, - const VarDecl *temporary); // This function generates the 'combiner' section for a reduction recipe. Note // that this function is not 'insertion point' clean, in that it alters the @@ -66,11 +68,19 @@ protected: mlir::Value mainOp, mlir::acc::ReductionRecipeOp recipe, size_t numBounds); + void createInitRecipe(mlir::Location loc, mlir::Location locEnd, SourceRange exprRange, mlir::Value mainOp, mlir::Region &recipeInitRegion, size_t numBounds, llvm::ArrayRef<QualType> boundTypes, - const VarDecl *allocaDecl, QualType origType); + const VarDecl *allocaDecl, QualType origType, + bool emitInitExpr); + + void createFirstprivateRecipeCopy(mlir::Location loc, mlir::Location locEnd, + mlir::Value mainOp, + const VarDecl *allocaDecl, + const VarDecl *temporary, + mlir::Region ©Region, size_t numBounds); void createRecipeDestroySection(mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, CharUnits alignment, @@ -150,63 +160,6 @@ class OpenACCRecipeBuilder : OpenACCRecipeBuilderBase { return recipeName; } - // Create the 'init' section of the recipe, including the 'copy' section for - // 'firstprivate'. Note that this function is not 'insertion point' clean, in - // that it alters the insertion point to be inside of the 'destroy' section of - // the recipe, but doesn't restore it aftewards. - void createRecipeInitCopy(mlir::Location loc, mlir::Location locEnd, - SourceRange exprRange, mlir::Value mainOp, - RecipeTy recipe, const VarDecl *varRecipe, - const VarDecl *temporary) { - // TODO: OpenACC: when we get the 'pointer' variants for - // firstprivate/reduction, this probably should be removed/split into - // functions for the BuilderBase. - assert(varRecipe && "Required recipe variable not set?"); - - CIRGenFunction::AutoVarEmission tempDeclEmission{ - CIRGenFunction::AutoVarEmission::invalid()}; - CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, varRecipe}; - - // Do the 'init' section of the recipe IR, which does an alloca, then the - // initialization (except for firstprivate). - mlir::Block *block = - createRecipeBlock(recipe.getInitRegion(), mainOp.getType(), loc, - /*numBounds=*/0, /*isInit=*/true); - builder.setInsertionPointToEnd(&recipe.getInitRegion().back()); - CIRGenFunction::LexicalScope ls(cgf, loc, block); - - tempDeclEmission = - cgf.emitAutoVarAlloca(*varRecipe, builder.saveInsertionPoint()); - - // 'firstprivate' doesn't do its initialization in the 'init' section, - // instead it does it in the 'copy' section. SO, only do 'init' here for - // reduction. - if constexpr (std::is_same_v<RecipeTy, mlir::acc::ReductionRecipeOp>) { - // Unlike Private, the recipe here is always required as it has to do - // init, not just 'default' init. - if (!varRecipe->getInit()) - cgf.cgm.errorNYI(exprRange, "reduction init recipe"); - cgf.emitAutoVarInit(tempDeclEmission); - } - - mlir::acc::YieldOp::create(builder, locEnd); - - if constexpr (std::is_same_v<RecipeTy, mlir::acc::FirstprivateRecipeOp>) { - if (!varRecipe->getInit()) { - // If we don't have any initialization recipe, we failed during Sema to - // initialize this correctly. If we disable the - // Sema::TentativeAnalysisScopes in SemaOpenACC::CreateInitRecipe, it'll - // emit an error to tell us. However, emitting those errors during - // production is a violation of the standard, so we cannot do them. - cgf.cgm.errorNYI( - exprRange, "firstprivate copy-init recipe not properly generated"); - } - - createFirstprivateRecipeCopy(loc, locEnd, mainOp, tempDeclEmission, - recipe, varRecipe, temporary); - } - } - public: OpenACCRecipeBuilder(CIRGen::CIRGenFunction &cgf, CIRGen::CIRGenBuilderTy &builder) @@ -221,19 +174,6 @@ public: BuiltinType::ArraySection) && "array section shouldn't make it to recipe creation"); - // TODO: OpenACC: This is a bit of a hackery to get this to not change for - // the non-private recipes. This will be removed soon, when we get this - // 'right' for firstprivate and reduction. - if constexpr (std::is_same_v<RecipeTy, mlir::acc::FirstprivateRecipeOp>) { - if (numBounds) { - cgf.cgm.errorNYI(varRef->getSourceRange(), - "firstprivate-init with bounds"); - } - boundTypes = {}; - numBounds = 0; - origType = baseType; - } - mlir::ModuleOp mod = builder.getBlock() ->getParent() ->template getParentOfType<mlir::ModuleOp>(); @@ -262,21 +202,20 @@ public: if constexpr (std::is_same_v<RecipeTy, mlir::acc::PrivateRecipeOp>) { createInitRecipe(loc, locEnd, varRef->getSourceRange(), mainOp, recipe.getInitRegion(), numBounds, boundTypes, varRecipe, - origType); + origType, /*emitInitExpr=*/true); } else if constexpr (std::is_same_v<RecipeTy, mlir::acc::ReductionRecipeOp>) { createInitRecipe(loc, locEnd, varRef->getSourceRange(), mainOp, recipe.getInitRegion(), numBounds, boundTypes, varRecipe, - origType); + origType, /*emitInitExpr=*/true); createReductionRecipeCombiner(loc, locEnd, mainOp, recipe, numBounds); } else { static_assert(std::is_same_v<RecipeTy, mlir::acc::FirstprivateRecipeOp>); - // TODO: OpenACC: we probably want this to call createInitRecipe as well, - // but do so in a way that omits the 'initialization', so that we can do - // it separately, since it belongs in the 'copy' region. It also might - // need a way of getting the tempDeclEmission out of it for that purpose. - createRecipeInitCopy(loc, locEnd, varRef->getSourceRange(), mainOp, - recipe, varRecipe, temporary); + createInitRecipe(loc, locEnd, varRef->getSourceRange(), mainOp, + recipe.getInitRegion(), numBounds, boundTypes, varRecipe, + origType, /*emitInitExpr=*/false); + createFirstprivateRecipeCopy(loc, locEnd, mainOp, varRecipe, temporary, + recipe.getCopyRegion(), numBounds); } if (origType.isDestructedType()) diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp index c15637d..2eeef81 100644 --- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp +++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp @@ -8,18 +8,39 @@ #include "PassDetail.h" #include "clang/AST/ASTContext.h" +#include "clang/Basic/Module.h" #include "clang/Basic/TargetInfo.h" #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" #include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/Passes.h" #include "clang/CIR/MissingFeatures.h" +#include "llvm/Support/Path.h" #include <memory> using namespace mlir; using namespace cir; +static SmallString<128> getTransformedFileName(mlir::ModuleOp mlirModule) { + SmallString<128> fileName; + + if (mlirModule.getSymName()) + fileName = llvm::sys::path::filename(mlirModule.getSymName()->str()); + + if (fileName.empty()) + fileName = "<null>"; + + for (size_t i = 0; i < fileName.size(); ++i) { + // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens + // to be the set of C preprocessing numbers. + if (!clang::isPreprocessingNumberBody(fileName[i])) + fileName[i] = '_'; + } + + return fileName; +} + namespace { struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> { LoweringPreparePass() = default; @@ -30,9 +51,16 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> { void lowerComplexDivOp(cir::ComplexDivOp op); void lowerComplexMulOp(cir::ComplexMulOp op); void lowerUnaryOp(cir::UnaryOp op); + void lowerGlobalOp(cir::GlobalOp op); void lowerArrayDtor(cir::ArrayDtor op); void lowerArrayCtor(cir::ArrayCtor op); + /// Build the function that initializes the specified global + cir::FuncOp buildCXXGlobalVarDeclInitFunc(cir::GlobalOp op); + + /// Build a module init function that calls all the dynamic initializers. + void buildCXXGlobalInitFunc(); + cir::FuncOp buildRuntimeFunction( mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc, cir::FuncType type, @@ -47,6 +75,10 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> { /// Tracks current module. mlir::ModuleOp mlirModule; + /// Tracks existing dynamic initializers. + llvm::StringMap<uint32_t> dynamicInitializerNames; + llvm::SmallVector<cir::FuncOp> dynamicInitializers; + void setASTContext(clang::ASTContext *c) { astCtx = c; } }; @@ -589,6 +621,111 @@ void LoweringPreparePass::lowerUnaryOp(cir::UnaryOp op) { op.erase(); } +cir::FuncOp +LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(cir::GlobalOp op) { + // TODO(cir): Store this in the GlobalOp. + // This should come from the MangleContext, but for now I'm hardcoding it. + SmallString<256> fnName("__cxx_global_var_init"); + // Get a unique name + uint32_t cnt = dynamicInitializerNames[fnName]++; + if (cnt) + fnName += "." + llvm::Twine(cnt).str(); + + // Create a variable initialization function. + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointAfter(op); + auto fnType = cir::FuncType::get({}, builder.getVoidTy()); + FuncOp f = buildRuntimeFunction(builder, fnName, op.getLoc(), fnType, + cir::GlobalLinkageKind::InternalLinkage); + + // Move over the initialzation code of the ctor region. + mlir::Block *entryBB = f.addEntryBlock(); + if (!op.getCtorRegion().empty()) { + mlir::Block &block = op.getCtorRegion().front(); + entryBB->getOperations().splice(entryBB->begin(), block.getOperations(), + block.begin(), std::prev(block.end())); + } + + // Register the destructor call with __cxa_atexit + mlir::Region &dtorRegion = op.getDtorRegion(); + if (!dtorRegion.empty()) { + assert(!cir::MissingFeatures::opGlobalDtorLowering()); + llvm_unreachable("dtor region lowering is NYI"); + } + + // Replace cir.yield with cir.return + builder.setInsertionPointToEnd(entryBB); + mlir::Operation *yieldOp = nullptr; + if (!op.getCtorRegion().empty()) { + mlir::Block &block = op.getCtorRegion().front(); + yieldOp = &block.getOperations().back(); + } else { + assert(!cir::MissingFeatures::opGlobalDtorLowering()); + llvm_unreachable("dtor region lowering is NYI"); + } + + assert(isa<YieldOp>(*yieldOp)); + cir::ReturnOp::create(builder, yieldOp->getLoc()); + return f; +} + +void LoweringPreparePass::lowerGlobalOp(GlobalOp op) { + mlir::Region &ctorRegion = op.getCtorRegion(); + mlir::Region &dtorRegion = op.getDtorRegion(); + + if (!ctorRegion.empty() || !dtorRegion.empty()) { + // Build a variable initialization function and move the initialzation code + // in the ctor region over. + cir::FuncOp f = buildCXXGlobalVarDeclInitFunc(op); + + // Clear the ctor and dtor region + ctorRegion.getBlocks().clear(); + dtorRegion.getBlocks().clear(); + + assert(!cir::MissingFeatures::astVarDeclInterface()); + dynamicInitializers.push_back(f); + } + + assert(!cir::MissingFeatures::opGlobalAnnotations()); +} + +void LoweringPreparePass::buildCXXGlobalInitFunc() { + if (dynamicInitializers.empty()) + return; + + assert(!cir::MissingFeatures::opGlobalCtorList()); + + SmallString<256> fnName; + // Include the filename in the symbol name. Including "sub_" matches gcc + // and makes sure these symbols appear lexicographically behind the symbols + // with priority (TBD). Module implementation units behave the same + // way as a non-modular TU with imports. + // TODO: check CXX20ModuleInits + if (astCtx->getCurrentNamedModule() && + !astCtx->getCurrentNamedModule()->isModuleImplementation()) { + llvm::raw_svector_ostream out(fnName); + std::unique_ptr<clang::MangleContext> mangleCtx( + astCtx->createMangleContext()); + cast<clang::ItaniumMangleContext>(*mangleCtx) + .mangleModuleInitializer(astCtx->getCurrentNamedModule(), out); + } else { + fnName += "_GLOBAL__sub_I_"; + fnName += getTransformedFileName(mlirModule); + } + + CIRBaseBuilderTy builder(getContext()); + builder.setInsertionPointToEnd(&mlirModule.getBodyRegion().back()); + auto fnType = cir::FuncType::get({}, builder.getVoidTy()); + cir::FuncOp f = + buildRuntimeFunction(builder, fnName, mlirModule.getLoc(), fnType, + cir::GlobalLinkageKind::ExternalLinkage); + builder.setInsertionPointToStart(f.addEntryBlock()); + for (cir::FuncOp &f : dynamicInitializers) + builder.createCallOp(f.getLoc(), f, {}); + + cir::ReturnOp::create(builder, f.getLoc()); +} + static void lowerArrayDtorCtorIntoLoop(cir::CIRBaseBuilderTy &builder, clang::ASTContext *astCtx, mlir::Operation *op, mlir::Type eltTy, @@ -691,6 +828,8 @@ void LoweringPreparePass::runOnOp(mlir::Operation *op) { lowerComplexDivOp(complexDiv); else if (auto complexMul = mlir::dyn_cast<cir::ComplexMulOp>(op)) lowerComplexMulOp(complexMul); + else if (auto glob = mlir::dyn_cast<cir::GlobalOp>(op)) + lowerGlobalOp(glob); else if (auto unary = mlir::dyn_cast<cir::UnaryOp>(op)) lowerUnaryOp(unary); } @@ -704,12 +843,15 @@ void LoweringPreparePass::runOnOperation() { op->walk([&](mlir::Operation *op) { if (mlir::isa<cir::ArrayCtor, cir::ArrayDtor, cir::CastOp, - cir::ComplexMulOp, cir::ComplexDivOp, cir::UnaryOp>(op)) + cir::ComplexMulOp, cir::ComplexDivOp, cir::GlobalOp, + cir::UnaryOp>(op)) opsToTransform.push_back(op); }); for (mlir::Operation *o : opsToTransform) runOnOp(o); + + buildCXXGlobalInitFunc(); } std::unique_ptr<Pass> mlir::createLoweringPreparePass() { diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index e6e4947..9f30287 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -6784,29 +6784,26 @@ LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; } -void CodeGenFunction::FlattenAccessAndType( - Address Addr, QualType AddrType, - SmallVectorImpl<std::pair<Address, llvm::Value *>> &AccessList, - SmallVectorImpl<QualType> &FlatTypes) { - // WorkList is list of type we are processing + the Index List to access - // the field of that type in Addr for use in a GEP - llvm::SmallVector<std::pair<QualType, llvm::SmallVector<llvm::Value *, 4>>, - 16> +void CodeGenFunction::FlattenAccessAndTypeLValue( + LValue Val, SmallVectorImpl<LValue> &AccessList) { + + llvm::SmallVector< + std::tuple<LValue, QualType, llvm::SmallVector<llvm::Value *, 4>>, 16> WorkList; llvm::IntegerType *IdxTy = llvm::IntegerType::get(getLLVMContext(), 32); - // Addr should be a pointer so we need to 'dereference' it - WorkList.push_back({AddrType, {llvm::ConstantInt::get(IdxTy, 0)}}); + WorkList.push_back({Val, Val.getType(), {llvm::ConstantInt::get(IdxTy, 0)}}); while (!WorkList.empty()) { - auto [T, IdxList] = WorkList.pop_back_val(); + auto [LVal, T, IdxList] = WorkList.pop_back_val(); T = T.getCanonicalType().getUnqualifiedType(); assert(!isa<MatrixType>(T) && "Matrix types not yet supported in HLSL"); + if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) { uint64_t Size = CAT->getZExtSize(); for (int64_t I = Size - 1; I > -1; I--) { llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList; IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I)); - WorkList.emplace_back(CAT->getElementType(), IdxListCopy); + WorkList.emplace_back(LVal, CAT->getElementType(), IdxListCopy); } } else if (const auto *RT = dyn_cast<RecordType>(T)) { const RecordDecl *Record = RT->getOriginalDecl()->getDefinitionOrSelf(); @@ -6814,44 +6811,75 @@ void CodeGenFunction::FlattenAccessAndType( const CXXRecordDecl *CXXD = dyn_cast<CXXRecordDecl>(Record); - llvm::SmallVector<QualType, 16> FieldTypes; + llvm::SmallVector< + std::tuple<LValue, QualType, llvm::SmallVector<llvm::Value *, 4>>, 16> + ReverseList; if (CXXD && CXXD->isStandardLayout()) Record = CXXD->getStandardLayoutBaseWithFields(); // deal with potential base classes if (CXXD && !CXXD->isStandardLayout()) { - for (auto &Base : CXXD->bases()) - FieldTypes.push_back(Base.getType()); + if (CXXD->getNumBases() > 0) { + assert(CXXD->getNumBases() == 1 && + "HLSL doesn't support multiple inheritance."); + auto Base = CXXD->bases_begin(); + llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList; + IdxListCopy.push_back(llvm::ConstantInt::get( + IdxTy, 0)); // base struct should be at index zero + ReverseList.emplace_back(LVal, Base->getType(), IdxListCopy); + } } - for (auto *FD : Record->fields()) - FieldTypes.push_back(FD->getType()); + const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(Record); - for (int64_t I = FieldTypes.size() - 1; I > -1; I--) { - llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList; - IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I)); - WorkList.insert(WorkList.end(), {FieldTypes[I], IdxListCopy}); + llvm::Type *LLVMT = ConvertTypeForMem(T); + CharUnits Align = getContext().getTypeAlignInChars(T); + LValue RLValue; + bool createdGEP = false; + for (auto *FD : Record->fields()) { + if (FD->isBitField()) { + if (FD->isUnnamedBitField()) + continue; + if (!createdGEP) { + createdGEP = true; + Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList, + LLVMT, Align, "gep"); + RLValue = MakeAddrLValue(GEP, T); + } + LValue FieldLVal = EmitLValueForField(RLValue, FD, true); + ReverseList.push_back({FieldLVal, FD->getType(), {}}); + } else { + llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList; + IdxListCopy.push_back( + llvm::ConstantInt::get(IdxTy, Layout.getLLVMFieldNo(FD))); + ReverseList.emplace_back(LVal, FD->getType(), IdxListCopy); + } } + + std::reverse(ReverseList.begin(), ReverseList.end()); + llvm::append_range(WorkList, ReverseList); } else if (const auto *VT = dyn_cast<VectorType>(T)) { llvm::Type *LLVMT = ConvertTypeForMem(T); CharUnits Align = getContext().getTypeAlignInChars(T); - Address GEP = - Builder.CreateInBoundsGEP(Addr, IdxList, LLVMT, Align, "vector.gep"); + Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList, LLVMT, + Align, "vector.gep"); + LValue Base = MakeAddrLValue(GEP, T); for (unsigned I = 0, E = VT->getNumElements(); I < E; I++) { - llvm::Value *Idx = llvm::ConstantInt::get(IdxTy, I); - // gep on vector fields is not recommended so combine gep with - // extract/insert - AccessList.emplace_back(GEP, Idx); - FlatTypes.push_back(VT->getElementType()); + llvm::Constant *Idx = llvm::ConstantInt::get(IdxTy, I); + LValue LV = + LValue::MakeVectorElt(Base.getAddress(), Idx, VT->getElementType(), + Base.getBaseInfo(), TBAAAccessInfo()); + AccessList.emplace_back(LV); } - } else { - // a scalar/builtin type - llvm::Type *LLVMT = ConvertTypeForMem(T); - CharUnits Align = getContext().getTypeAlignInChars(T); - Address GEP = - Builder.CreateInBoundsGEP(Addr, IdxList, LLVMT, Align, "gep"); - AccessList.emplace_back(GEP, nullptr); - FlatTypes.push_back(T); + } else { // a scalar/builtin type + if (!IdxList.empty()) { + llvm::Type *LLVMT = ConvertTypeForMem(T); + CharUnits Align = getContext().getTypeAlignInChars(T); + Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList, + LLVMT, Align, "gep"); + AccessList.emplace_back(MakeAddrLValue(GEP, T)); + } else // must be a bitfield we already created an lvalue for + AccessList.emplace_back(LVal); } } } diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp index b8150a2..07b9aeb 100644 --- a/clang/lib/CodeGen/CGExprAgg.cpp +++ b/clang/lib/CodeGen/CGExprAgg.cpp @@ -488,100 +488,62 @@ static bool isTrivialFiller(Expr *E) { return false; } -static void EmitHLSLAggregateSplatCast(CodeGenFunction &CGF, Address DestVal, - QualType DestTy, llvm::Value *SrcVal, - QualType SrcTy, SourceLocation Loc) { +// emit an elementwise cast where the RHS is a scalar or vector +// or emit an aggregate splat cast +static void EmitHLSLScalarElementwiseAndSplatCasts(CodeGenFunction &CGF, + LValue DestVal, + llvm::Value *SrcVal, + QualType SrcTy, + SourceLocation Loc) { // Flatten our destination - SmallVector<QualType> DestTypes; // Flattened type - SmallVector<std::pair<Address, llvm::Value *>, 16> StoreGEPList; - // ^^ Flattened accesses to DestVal we want to store into - CGF.FlattenAccessAndType(DestVal, DestTy, StoreGEPList, DestTypes); - - assert(SrcTy->isScalarType() && "Invalid HLSL Aggregate splat cast."); - for (unsigned I = 0, Size = StoreGEPList.size(); I < Size; ++I) { - llvm::Value *Cast = - CGF.EmitScalarConversion(SrcVal, SrcTy, DestTypes[I], Loc); - - // store back - llvm::Value *Idx = StoreGEPList[I].second; - if (Idx) { - llvm::Value *V = - CGF.Builder.CreateLoad(StoreGEPList[I].first, "load.for.insert"); - Cast = CGF.Builder.CreateInsertElement(V, Cast, Idx); - } - CGF.Builder.CreateStore(Cast, StoreGEPList[I].first); - } -} - -// emit a flat cast where the RHS is a scalar, including vector -static void EmitHLSLScalarFlatCast(CodeGenFunction &CGF, Address DestVal, - QualType DestTy, llvm::Value *SrcVal, - QualType SrcTy, SourceLocation Loc) { - // Flatten our destination - SmallVector<QualType, 16> DestTypes; // Flattened type - SmallVector<std::pair<Address, llvm::Value *>, 16> StoreGEPList; - // ^^ Flattened accesses to DestVal we want to store into - CGF.FlattenAccessAndType(DestVal, DestTy, StoreGEPList, DestTypes); - - assert(SrcTy->isVectorType() && "HLSL Flat cast doesn't handle splatting."); - const VectorType *VT = SrcTy->getAs<VectorType>(); - SrcTy = VT->getElementType(); - assert(StoreGEPList.size() <= VT->getNumElements() && - "Cannot perform HLSL flat cast when vector source \ - object has less elements than flattened destination \ - object."); - for (unsigned I = 0, Size = StoreGEPList.size(); I < Size; I++) { - llvm::Value *Load = CGF.Builder.CreateExtractElement(SrcVal, I, "vec.load"); + SmallVector<LValue, 16> StoreList; + CGF.FlattenAccessAndTypeLValue(DestVal, StoreList); + + bool isVector = false; + if (auto *VT = SrcTy->getAs<VectorType>()) { + isVector = true; + SrcTy = VT->getElementType(); + assert(StoreList.size() <= VT->getNumElements() && + "Cannot perform HLSL flat cast when vector source \ + object has less elements than flattened destination \ + object."); + } + + for (unsigned I = 0, Size = StoreList.size(); I < Size; I++) { + LValue DestLVal = StoreList[I]; + llvm::Value *Load = + isVector ? CGF.Builder.CreateExtractElement(SrcVal, I, "vec.load") + : SrcVal; llvm::Value *Cast = - CGF.EmitScalarConversion(Load, SrcTy, DestTypes[I], Loc); - - // store back - llvm::Value *Idx = StoreGEPList[I].second; - if (Idx) { - llvm::Value *V = - CGF.Builder.CreateLoad(StoreGEPList[I].first, "load.for.insert"); - Cast = CGF.Builder.CreateInsertElement(V, Cast, Idx); - } - CGF.Builder.CreateStore(Cast, StoreGEPList[I].first); + CGF.EmitScalarConversion(Load, SrcTy, DestLVal.getType(), Loc); + CGF.EmitStoreThroughLValue(RValue::get(Cast), DestLVal); } } // emit a flat cast where the RHS is an aggregate -static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, Address DestVal, - QualType DestTy, Address SrcVal, - QualType SrcTy, SourceLocation Loc) { +static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue DestVal, + LValue SrcVal, SourceLocation Loc) { // Flatten our destination - SmallVector<QualType, 16> DestTypes; // Flattened type - SmallVector<std::pair<Address, llvm::Value *>, 16> StoreGEPList; - // ^^ Flattened accesses to DestVal we want to store into - CGF.FlattenAccessAndType(DestVal, DestTy, StoreGEPList, DestTypes); + SmallVector<LValue, 16> StoreList; + CGF.FlattenAccessAndTypeLValue(DestVal, StoreList); // Flatten our src - SmallVector<QualType, 16> SrcTypes; // Flattened type - SmallVector<std::pair<Address, llvm::Value *>, 16> LoadGEPList; - // ^^ Flattened accesses to SrcVal we want to load from - CGF.FlattenAccessAndType(SrcVal, SrcTy, LoadGEPList, SrcTypes); + SmallVector<LValue, 16> LoadList; + CGF.FlattenAccessAndTypeLValue(SrcVal, LoadList); - assert(StoreGEPList.size() <= LoadGEPList.size() && - "Cannot perform HLSL flat cast when flattened source object \ + assert(StoreList.size() <= LoadList.size() && + "Cannot perform HLSL elementwise cast when flattened source object \ has less elements than flattened destination object."); - // apply casts to what we load from LoadGEPList + // apply casts to what we load from LoadList // and store result in Dest - for (unsigned I = 0, E = StoreGEPList.size(); I < E; I++) { - llvm::Value *Idx = LoadGEPList[I].second; - llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[I].first, "load"); - Load = - Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract") : Load; - llvm::Value *Cast = - CGF.EmitScalarConversion(Load, SrcTypes[I], DestTypes[I], Loc); - - // store back - Idx = StoreGEPList[I].second; - if (Idx) { - llvm::Value *V = - CGF.Builder.CreateLoad(StoreGEPList[I].first, "load.for.insert"); - Cast = CGF.Builder.CreateInsertElement(V, Cast, Idx); - } - CGF.Builder.CreateStore(Cast, StoreGEPList[I].first); + for (unsigned I = 0, E = StoreList.size(); I < E; I++) { + LValue DestLVal = StoreList[I]; + LValue SrcLVal = LoadList[I]; + RValue RVal = CGF.EmitLoadOfLValue(SrcLVal, Loc); + assert(RVal.isScalar() && "All flattened source values should be scalars"); + llvm::Value *Val = RVal.getScalarVal(); + llvm::Value *Cast = CGF.EmitScalarConversion(Val, SrcLVal.getType(), + DestLVal.getType(), Loc); + CGF.EmitStoreThroughLValue(RValue::get(Cast), DestLVal); } } @@ -988,31 +950,33 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { Expr *Src = E->getSubExpr(); QualType SrcTy = Src->getType(); RValue RV = CGF.EmitAnyExpr(Src); - QualType DestTy = E->getType(); - Address DestVal = Dest.getAddress(); + LValue DestLVal = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); SourceLocation Loc = E->getExprLoc(); - assert(RV.isScalar() && "RHS of HLSL splat cast must be a scalar."); + assert(RV.isScalar() && SrcTy->isScalarType() && + "RHS of HLSL splat cast must be a scalar."); llvm::Value *SrcVal = RV.getScalarVal(); - EmitHLSLAggregateSplatCast(CGF, DestVal, DestTy, SrcVal, SrcTy, Loc); + EmitHLSLScalarElementwiseAndSplatCasts(CGF, DestLVal, SrcVal, SrcTy, Loc); break; } case CK_HLSLElementwiseCast: { Expr *Src = E->getSubExpr(); QualType SrcTy = Src->getType(); RValue RV = CGF.EmitAnyExpr(Src); - QualType DestTy = E->getType(); - Address DestVal = Dest.getAddress(); + LValue DestLVal = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); SourceLocation Loc = E->getExprLoc(); if (RV.isScalar()) { llvm::Value *SrcVal = RV.getScalarVal(); - EmitHLSLScalarFlatCast(CGF, DestVal, DestTy, SrcVal, SrcTy, Loc); + assert(SrcTy->isVectorType() && + "HLSL Elementwise cast doesn't handle splatting."); + EmitHLSLScalarElementwiseAndSplatCasts(CGF, DestLVal, SrcVal, SrcTy, Loc); } else { assert(RV.isAggregate() && "Can't perform HLSL Aggregate cast on a complex type."); Address SrcVal = RV.getAggregateAddress(); - EmitHLSLElementwiseCast(CGF, DestVal, DestTy, SrcVal, SrcTy, Loc); + EmitHLSLElementwiseCast(CGF, DestLVal, CGF.MakeAddrLValue(SrcVal, SrcTy), + Loc); } break; } diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp index c961222..06d9d81 100644 --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -2397,39 +2397,37 @@ bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) { } // RHS is an aggregate type -static Value *EmitHLSLElementwiseCast(CodeGenFunction &CGF, Address RHSVal, - QualType RHSTy, QualType LHSTy, - SourceLocation Loc) { - SmallVector<std::pair<Address, llvm::Value *>, 16> LoadGEPList; - SmallVector<QualType, 16> SrcTypes; // Flattened type - CGF.FlattenAccessAndType(RHSVal, RHSTy, LoadGEPList, SrcTypes); - // LHS is either a vector or a builtin? +static Value *EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue SrcVal, + QualType DestTy, SourceLocation Loc) { + SmallVector<LValue, 16> LoadList; + CGF.FlattenAccessAndTypeLValue(SrcVal, LoadList); + // Dest is either a vector or a builtin? // if its a vector create a temp alloca to store into and return that - if (auto *VecTy = LHSTy->getAs<VectorType>()) { - assert(SrcTypes.size() >= VecTy->getNumElements() && - "Flattened type on RHS must have more elements than vector on LHS."); + if (auto *VecTy = DestTy->getAs<VectorType>()) { + assert(LoadList.size() >= VecTy->getNumElements() && + "Flattened type on RHS must have the same number or more elements " + "than vector on LHS."); llvm::Value *V = - CGF.Builder.CreateLoad(CGF.CreateIRTemp(LHSTy, "flatcast.tmp")); + CGF.Builder.CreateLoad(CGF.CreateIRTemp(DestTy, "flatcast.tmp")); // write to V. for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) { - llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[I].first, "load"); - llvm::Value *Idx = LoadGEPList[I].second; - Load = Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract") - : Load; - llvm::Value *Cast = CGF.EmitScalarConversion( - Load, SrcTypes[I], VecTy->getElementType(), Loc); + RValue RVal = CGF.EmitLoadOfLValue(LoadList[I], Loc); + assert(RVal.isScalar() && + "All flattened source values should be scalars."); + llvm::Value *Cast = + CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[I].getType(), + VecTy->getElementType(), Loc); V = CGF.Builder.CreateInsertElement(V, Cast, I); } return V; } - // i its a builtin just do an extract element or load. - assert(LHSTy->isBuiltinType() && + // if its a builtin just do an extract element or load. + assert(DestTy->isBuiltinType() && "Destination type must be a vector or builtin type."); - llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[0].first, "load"); - llvm::Value *Idx = LoadGEPList[0].second; - Load = - Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract") : Load; - return CGF.EmitScalarConversion(Load, LHSTy, SrcTypes[0], Loc); + RValue RVal = CGF.EmitLoadOfLValue(LoadList[0], Loc); + assert(RVal.isScalar() && "All flattened source values should be scalars."); + return CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[0].getType(), + DestTy, Loc); } // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts @@ -2954,12 +2952,11 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_HLSLElementwiseCast: { RValue RV = CGF.EmitAnyExpr(E); SourceLocation Loc = CE->getExprLoc(); - QualType SrcTy = E->getType(); assert(RV.isAggregate() && "Not a valid HLSL Elementwise Cast."); // RHS is an aggregate - Address SrcVal = RV.getAggregateAddress(); - return EmitHLSLElementwiseCast(CGF, SrcVal, SrcTy, DestTy, Loc); + LValue SrcVal = CGF.MakeAddrLValue(RV.getAggregateAddress(), E->getType()); + return EmitHLSLElementwiseCast(CGF, SrcVal, DestTy, Loc); } } // end of switch diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index f0565c1..99de6e1 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -4464,10 +4464,8 @@ public: AggValueSlot slot = AggValueSlot::ignored()); LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e); - void FlattenAccessAndType( - Address Addr, QualType AddrTy, - SmallVectorImpl<std::pair<Address, llvm::Value *>> &AccessList, - SmallVectorImpl<QualType> &FlatTypes); + void FlattenAccessAndTypeLValue(LValue LVal, + SmallVectorImpl<LValue> &AccessList); llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar); diff --git a/clang/lib/Headers/avx512fp16intrin.h b/clang/lib/Headers/avx512fp16intrin.h index 4bd7981..d951ba0 100644 --- a/clang/lib/Headers/avx512fp16intrin.h +++ b/clang/lib/Headers/avx512fp16intrin.h @@ -41,7 +41,8 @@ typedef _Float16 __m512h_u __attribute__((__vector_size__(64), __aligned__(1))); #define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 #endif -static __inline__ _Float16 __DEFAULT_FN_ATTRS512 _mm512_cvtsh_h(__m512h __a) { +static __inline__ _Float16 __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_cvtsh_h(__m512h __a) { return __a[0]; } diff --git a/clang/lib/Headers/avx512vlintrin.h b/clang/lib/Headers/avx512vlintrin.h index 754f43a..965741f 100644 --- a/clang/lib/Headers/avx512vlintrin.h +++ b/clang/lib/Headers/avx512vlintrin.h @@ -7330,9 +7330,8 @@ _mm256_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) __builtin_ia32_pmovusqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_cvtepi32_epi8 (__m128i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_cvtepi32_epi8(__m128i __A) { return (__m128i)__builtin_shufflevector( __builtin_convertvector((__v4si)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7); @@ -7360,9 +7359,8 @@ _mm_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) __builtin_ia32_pmovdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi32_epi8 (__m256i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_cvtepi32_epi8(__m256i __A) { return (__m128i)__builtin_shufflevector( __builtin_convertvector((__v8si)__A, __v8qi), (__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, @@ -7370,8 +7368,7 @@ _mm256_cvtepi32_epi8 (__m256i __A) } static __inline__ __m128i __DEFAULT_FN_ATTRS256 -_mm256_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A) -{ +_mm256_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A, (__v16qi) __O, __M); } @@ -7390,9 +7387,8 @@ _mm256_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) __builtin_ia32_pmovdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_cvtepi32_epi16 (__m128i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_cvtepi32_epi16(__m128i __A) { return (__m128i)__builtin_shufflevector( __builtin_convertvector((__v4si)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7); @@ -7419,9 +7415,8 @@ _mm_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) __builtin_ia32_pmovdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi32_epi16 (__m256i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_cvtepi32_epi16(__m256i __A) { return (__m128i)__builtin_convertvector((__v8si)__A, __v8hi); } @@ -7446,9 +7441,8 @@ _mm256_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) __builtin_ia32_pmovdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_cvtepi64_epi8 (__m128i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_cvtepi64_epi8(__m128i __A) { return (__m128i)__builtin_shufflevector( __builtin_convertvector((__v2di)__A, __v2qi), (__v2qi){0, 0}, 0, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3); @@ -7475,9 +7469,8 @@ _mm_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) __builtin_ia32_pmovqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi64_epi8 (__m256i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_cvtepi64_epi8(__m256i __A) { return (__m128i)__builtin_shufflevector( __builtin_convertvector((__v4di)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7); @@ -7504,9 +7497,8 @@ _mm256_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) __builtin_ia32_pmovqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_cvtepi64_epi32 (__m128i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_cvtepi64_epi32(__m128i __A) { return (__m128i)__builtin_shufflevector( __builtin_convertvector((__v2di)__A, __v2si), (__v2si){0, 0}, 0, 1, 2, 3); } @@ -7532,23 +7524,20 @@ _mm_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A) __builtin_ia32_pmovqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi64_epi32 (__m256i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_cvtepi64_epi32(__m256i __A) { return (__m128i)__builtin_convertvector((__v4di)__A, __v4si); } -static __inline__ __m128i __DEFAULT_FN_ATTRS256 -_mm256_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, (__v4si)_mm256_cvtepi64_epi32(__A), (__v4si)__O); } -static __inline__ __m128i __DEFAULT_FN_ATTRS256 -_mm256_maskz_cvtepi64_epi32 (__mmask8 __M, __m256i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, (__v4si)_mm256_cvtepi64_epi32(__A), (__v4si)_mm_setzero_si128()); @@ -7560,9 +7549,8 @@ _mm256_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A) __builtin_ia32_pmovqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_cvtepi64_epi16 (__m128i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_cvtepi64_epi16(__m128i __A) { return (__m128i)__builtin_shufflevector( __builtin_convertvector((__v2di)__A, __v2hi), (__v2hi){0, 0}, 0, 1, 2, 3, 3, 3, 3, 3); @@ -7590,9 +7578,8 @@ _mm_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) __builtin_ia32_pmovqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi64_epi16 (__m256i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_cvtepi64_epi16(__m256i __A) { return (__m128i)__builtin_shufflevector( __builtin_convertvector((__v4di)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7); diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp index 11d2d5c..999e302c 100644 --- a/clang/lib/Sema/SemaConcept.cpp +++ b/clang/lib/Sema/SemaConcept.cpp @@ -1049,6 +1049,7 @@ ExprResult ConstraintSatisfactionChecker::Evaluate( case NormalizedConstraint::ConstraintKind::Compound: return Evaluate(static_cast<const CompoundConstraint &>(Constraint), MLTAL); } + llvm_unreachable("Unknown ConstraintKind enum"); } static bool CheckConstraintSatisfaction( @@ -2141,6 +2142,7 @@ bool SubstituteParameterMappings::substitute(NormalizedConstraint &N) { return substitute(Compound.getRHS()); } } + llvm_unreachable("Unknown ConstraintKind enum"); } } // namespace @@ -2561,7 +2563,6 @@ FormulaType SubsumptionChecker::Normalize(const NormalizedConstraint &NC) { }; switch (NC.getKind()) { - case NormalizedConstraint::ConstraintKind::Atomic: return {{find(&static_cast<const AtomicConstraint &>(NC))}}; @@ -2601,6 +2602,7 @@ FormulaType SubsumptionChecker::Normalize(const NormalizedConstraint &NC) { return Res; } } + llvm_unreachable("Unknown ConstraintKind enum"); } void SubsumptionChecker::AddUniqueClauseToFormula(Formula &F, Clause C) { diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp index 0069b08..6eaf7b9 100644 --- a/clang/lib/Sema/SemaDecl.cpp +++ b/clang/lib/Sema/SemaDecl.cpp @@ -11041,17 +11041,6 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, << CUDA().getConfigureFuncName(); Context.setcudaConfigureCallDecl(NewFD); } - - // Variadic functions, other than a *declaration* of printf, are not allowed - // in device-side CUDA code, unless someone passed - // -fcuda-allow-variadic-functions. - if (!getLangOpts().CUDAAllowVariadicFunctions && NewFD->isVariadic() && - (NewFD->hasAttr<CUDADeviceAttr>() || - NewFD->hasAttr<CUDAGlobalAttr>()) && - !(II && II->isStr("printf") && NewFD->isExternC() && - !D.isFunctionDefinition())) { - Diag(NewFD->getLocation(), diag::err_variadic_device_fn); - } } MarkUnusedFileScopedDecl(NewFD); diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp index fa30c66b..2b375b9 100644 --- a/clang/lib/Sema/SemaHLSL.cpp +++ b/clang/lib/Sema/SemaHLSL.cpp @@ -3571,9 +3571,6 @@ bool SemaHLSL::CanPerformAggregateSplatCast(Expr *Src, QualType DestTy) { if (SrcVecTy) SrcTy = SrcVecTy->getElementType(); - if (ContainsBitField(DestTy)) - return false; - llvm::SmallVector<QualType> DestTypes; BuildFlattenedTypeList(DestTy, DestTypes); @@ -3600,9 +3597,6 @@ bool SemaHLSL::CanPerformElementwiseCast(Expr *Src, QualType DestTy) { (DestTy->isScalarType() || DestTy->isVectorType())) return false; - if (ContainsBitField(DestTy) || ContainsBitField(SrcTy)) - return false; - llvm::SmallVector<QualType> DestTypes; BuildFlattenedTypeList(DestTy, DestTypes); llvm::SmallVector<QualType> SrcTypes; diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp index 7ad7049..8471f02 100644 --- a/clang/lib/Sema/SemaOpenACC.cpp +++ b/clang/lib/Sema/SemaOpenACC.cpp @@ -2724,16 +2724,6 @@ Expr *GenerateReductionInitRecipeExpr(ASTContext &Context, return InitExpr; } -const Expr *StripOffBounds(const Expr *VarExpr) { - while (isa_and_present<ArraySectionExpr, ArraySubscriptExpr>(VarExpr)) { - if (const auto *AS = dyn_cast<ArraySectionExpr>(VarExpr)) - VarExpr = AS->getBase()->IgnoreParenImpCasts(); - else if (const auto *Sub = dyn_cast<ArraySubscriptExpr>(VarExpr)) - VarExpr = Sub->getBase()->IgnoreParenImpCasts(); - } - return VarExpr; -} - VarDecl *CreateAllocaDecl(ASTContext &Ctx, DeclContext *DC, SourceLocation BeginLoc, IdentifierInfo *VarName, QualType VarTy) { @@ -2794,17 +2784,18 @@ OpenACCPrivateRecipe SemaOpenACC::CreatePrivateInitRecipe(const Expr *VarExpr) { OpenACCFirstPrivateRecipe SemaOpenACC::CreateFirstPrivateInitRecipe(const Expr *VarExpr) { - // TODO: OpenACC: This shouldn't be necessary, see PrivateInitRecipe - VarExpr = StripOffBounds(VarExpr); - + // We don't strip bounds here, so that we are doing our recipe init at the + // 'lowest' possible level. Codegen is going to have to do its own 'looping'. if (!VarExpr || VarExpr->getType()->isDependentType()) return OpenACCFirstPrivateRecipe::Empty(); QualType VarTy = VarExpr->getType().getNonReferenceType().getUnqualifiedType(); - // TODO: OpenACC: for arrays/bounds versions, we're going to have to do a - // different initializer, but for now we can go ahead with this. + // Array sections are special, and we have to treat them that way. + if (const auto *ASE = + dyn_cast<ArraySectionExpr>(VarExpr->IgnoreParenImpCasts())) + VarTy = ArraySectionExpr::getBaseOriginalType(ASE); VarDecl *AllocaDecl = CreateAllocaDecl( getASTContext(), SemaRef.getCurContext(), VarExpr->getBeginLoc(), diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp index b870114..5657dfe 100644 --- a/clang/lib/Sema/SemaOverload.cpp +++ b/clang/lib/Sema/SemaOverload.cpp @@ -4413,14 +4413,23 @@ CompareImplicitConversionSequences(Sema &S, SourceLocation Loc, Result = CompareStandardConversionSequences(S, Loc, ICS1.Standard, ICS2.Standard); else if (ICS1.isUserDefined()) { + // With lazy template loading, it is possible to find non-canonical + // FunctionDecls, depending on when redecl chains are completed. Make sure + // to compare the canonical decls of conversion functions. This avoids + // ambiguity problems for templated conversion operators. + const FunctionDecl *ConvFunc1 = ICS1.UserDefined.ConversionFunction; + if (ConvFunc1) + ConvFunc1 = ConvFunc1->getCanonicalDecl(); + const FunctionDecl *ConvFunc2 = ICS2.UserDefined.ConversionFunction; + if (ConvFunc2) + ConvFunc2 = ConvFunc2->getCanonicalDecl(); // User-defined conversion sequence U1 is a better conversion // sequence than another user-defined conversion sequence U2 if // they contain the same user-defined conversion function or // constructor and if the second standard conversion sequence of // U1 is better than the second standard conversion sequence of // U2 (C++ 13.3.3.2p3). - if (ICS1.UserDefined.ConversionFunction == - ICS2.UserDefined.ConversionFunction) + if (ConvFunc1 == ConvFunc2) Result = CompareStandardConversionSequences(S, Loc, ICS1.UserDefined.After, ICS2.UserDefined.After); diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp index e1f4d0d..b0096d8 100644 --- a/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp +++ b/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp @@ -509,6 +509,8 @@ bool initializeScanCompilerInstance( ScanInstance.getFrontendOpts().DisableFree = false; ScanInstance.getFrontendOpts().GenerateGlobalModuleIndex = false; ScanInstance.getFrontendOpts().UseGlobalModuleIndex = false; + ScanInstance.getFrontendOpts().GenReducedBMI = false; + ScanInstance.getFrontendOpts().ModuleOutputPath.clear(); // This will prevent us compiling individual modules asynchronously since // FileManager is not thread-safe, but it does improve performance for now. ScanInstance.getFrontendOpts().ModulesShareFileManager = true; diff --git a/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp b/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp index d67178c..a117bec 100644 --- a/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp +++ b/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp @@ -263,6 +263,10 @@ makeCommonInvocationForModuleBuild(CompilerInvocation CI) { // units. CI.getFrontendOpts().Inputs.clear(); CI.getFrontendOpts().OutputFile.clear(); + CI.getFrontendOpts().GenReducedBMI = false; + CI.getFrontendOpts().ModuleOutputPath.clear(); + CI.getHeaderSearchOpts().ModulesSkipHeaderSearchPaths = false; + CI.getHeaderSearchOpts().ModulesSkipDiagnosticOptions = false; // LLVM options are not going to affect the AST CI.getFrontendOpts().LLVMArgs.clear(); diff --git a/clang/test/AST/ByteCode/cxx20.cpp b/clang/test/AST/ByteCode/cxx20.cpp index 67bf9a7..1888998 100644 --- a/clang/test/AST/ByteCode/cxx20.cpp +++ b/clang/test/AST/ByteCode/cxx20.cpp @@ -1070,9 +1070,30 @@ namespace Virtual { public: int a = f(); - virtual constexpr int f() { return 10; } + virtual constexpr int f() const { return 10; } }; + K k; + static_assert(k.f() == 10); // both-error {{not an integral constant expression}} \ + // both-note {{virtual function called on object 'k' whose dynamic type is not constant}} + + void f() { + constexpr K k; + static_assert(k.f() == 10); + } + + void f2() { + K k; + static_assert(k.f() == 10); // both-error {{not an integral constant expression}} \ + // both-note {{virtual function called on object 'k' whose dynamic type is not constant}} + } + + static_assert(K().f() == 10); + + void f3() { + static_assert(K().f() == 10); + } + class L : public K { public: int b = f(); @@ -1083,6 +1104,42 @@ namespace Virtual { static_assert(l.a == 10); static_assert(l.b == 10); static_assert(l.c == 10); + static_assert(l.f() == 10); + + struct M { + K& mk = k; + }; + static_assert(M{}.mk.f() == 10); // both-error {{not an integral constant expression}} \ + // both-note {{virtual function called on object 'k' whose dynamic type is not constant}} + + struct N { + K* mk = &k; + }; + static_assert(N{}.mk->f() == 10); // both-error {{not an integral constant expression}} \ + // both-note {{virtual function called on object 'k' whose dynamic type is not constant}} + + extern K o; + static_assert(o.f() == 10); // both-error {{not an integral constant expression}} \ + // both-note {{virtual function called on object 'o' whose dynamic type is not constant}} + static K p; + static_assert(p.f() == 10); // both-error {{not an integral constant expression}} \ + // both-note {{virtual function called on object 'p' whose dynamic type is not constant}} + + void f4() { + static K p; + static_assert(p.f() == 10); // both-error {{not an integral constant expression}} \ + // both-note {{virtual function called on object 'p' whose dynamic type is not constant}} + } + + const K q; + static_assert(q.f() == 10); // both-error {{not an integral constant expression}} \ + // both-note {{virtual function called on object 'q' whose dynamic type is not constant}} + + void f5() { + const K q; + static_assert(q.f() == 10); // both-error {{not an integral constant expression}} \ + // both-note {{virtual function called on object 'q' whose dynamic type is not constant}} + } } namespace DiscardedTrivialCXXConstructExpr { @@ -1100,3 +1157,29 @@ namespace DiscardedTrivialCXXConstructExpr { constexpr int y = foo(12); // both-error {{must be initialized by a constant expression}} \ // both-note {{in call to}} } + +namespace VirtualFunctionCallThroughArrayElem { + struct X { + constexpr virtual int foo() const { + return 3; + } + }; + constexpr X xs[5]; + static_assert(xs[3].foo() == 3); + + constexpr X xs2[1][2]; + static_assert(xs2[0].foo() == 3); // both-error {{is not a structure or union}} + static_assert(xs2[0][0].foo() == 3); + + struct Y: public X { + constexpr int foo() const override { + return 1; + } + }; + constexpr Y ys[20]; + static_assert(ys[12].foo() == static_cast<const X&>(ys[12]).foo()); + + X a[3][4]; + static_assert(a[2][3].foo()); // both-error {{not an integral constant expression}} \ + // both-note {{virtual function called on object 'a[2][3]' whose dynamic type is not constant}} +} diff --git a/clang/test/CIR/CodeGen/global-init.cpp b/clang/test/CIR/CodeGen/global-init.cpp index 102affc..0c19e68 100644 --- a/clang/test/CIR/CodeGen/global-init.cpp +++ b/clang/test/CIR/CodeGen/global-init.cpp @@ -1,8 +1,9 @@ -// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2> %t-before.cir +// RUN: FileCheck --input-file=%t-before.cir %s --check-prefix=CIR-BEFORE-LPP // RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR -// Note: The CIR generated from this test isn't ready for lowering to LLVM yet. -// That will require changes to LoweringPrepare. +// Note: The LoweringPrepare work isn't yet complete. We still need to create +// the global ctor list attribute. struct NeedsCtor { NeedsCtor(); @@ -10,8 +11,16 @@ struct NeedsCtor { NeedsCtor needsCtor; -// CIR: cir.func private @_ZN9NeedsCtorC1Ev(!cir.ptr<!rec_NeedsCtor>) -// CIR: cir.global external @needsCtor = ctor : !rec_NeedsCtor { -// CIR: %[[THIS:.*]] = cir.get_global @needsCtor : !cir.ptr<!rec_NeedsCtor> -// CIR: cir.call @_ZN9NeedsCtorC1Ev(%[[THIS]]) : (!cir.ptr<!rec_NeedsCtor>) -> () +// CIR-BEFORE-LPP: cir.global external @needsCtor = ctor : !rec_NeedsCtor { +// CIR-BEFORE-LPP: %[[THIS:.*]] = cir.get_global @needsCtor : !cir.ptr<!rec_NeedsCtor> +// CIR-BEFORE-LPP: cir.call @_ZN9NeedsCtorC1Ev(%[[THIS]]) : (!cir.ptr<!rec_NeedsCtor>) -> () + +// CIR: cir.global external @needsCtor = #cir.zero : !rec_NeedsCtor +// CIR: cir.func internal private @__cxx_global_var_init() { +// CIR: %0 = cir.get_global @needsCtor : !cir.ptr<!rec_NeedsCtor> +// CIR: cir.call @_ZN9NeedsCtorC1Ev(%0) : (!cir.ptr<!rec_NeedsCtor>) -> () + +// CIR: cir.func private @_GLOBAL__sub_I_[[FILENAME:.*]]() { +// CIR: cir.call @__cxx_global_var_init() : () -> () +// CIR: cir.return // CIR: } diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index 91dae3f..3dcf7af 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -158,13 +158,13 @@ void test_new_with_complex_type() { } // CHECK: cir.func{{.*}} @_Z26test_new_with_complex_typev -// CHECK: %0 = cir.alloca !cir.ptr<!cir.complex<!cir.float>>, !cir.ptr<!cir.ptr<!cir.complex<!cir.float>>>, ["a", init] -// CHECK: %1 = cir.const #cir.int<8> : !u64i -// CHECK: %2 = cir.call @_Znwm(%1) : (!u64i) -> !cir.ptr<!void> -// CHECK: %3 = cir.cast bitcast %2 : !cir.ptr<!void> -> !cir.ptr<!cir.complex<!cir.float>> -// CHECK: %4 = cir.const #cir.const_complex<#cir.fp<1.000000e+00> : !cir.float, #cir.fp<2.000000e+00> : !cir.float> : !cir.complex<!cir.float> -// CHECK: cir.store align(8) %4, %3 : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>> -// CHECK: cir.store align(8) %3, %0 : !cir.ptr<!cir.complex<!cir.float>>, !cir.ptr<!cir.ptr<!cir.complex<!cir.float>>> +// CHECK: %[[A_ADDR:.*]] = cir.alloca !cir.ptr<!cir.complex<!cir.float>>, !cir.ptr<!cir.ptr<!cir.complex<!cir.float>>>, ["a", init] +// CHECK: %[[COMPLEX_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK: %[[NEW_COMPLEX:.*]] = cir.call @_Znwm(%[[COMPLEX_SIZE]]) : (!u64i) -> !cir.ptr<!void> +// CHECK: %[[COMPLEX_PTR:.*]] = cir.cast bitcast %[[NEW_COMPLEX]] : !cir.ptr<!void> -> !cir.ptr<!cir.complex<!cir.float>> +// CHECK: %[[COMPLEX_VAL:.*]] = cir.const #cir.const_complex<#cir.fp<1.000000e+00> : !cir.float, #cir.fp<2.000000e+00> : !cir.float> : !cir.complex<!cir.float> +// CHECK: cir.store{{.*}} %[[COMPLEX_VAL]], %[[COMPLEX_PTR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>> +// CHECK: cir.store{{.*}} %[[COMPLEX_PTR]], %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.ptr<!cir.ptr<!cir.complex<!cir.float>>> // LLVM: define{{.*}} void @_Z26test_new_with_complex_typev // LLVM: %[[A_ADDR:.*]] = alloca ptr, i64 1, align 8 diff --git a/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp b/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp index e836a37a..726cd3e 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct NoCopyConstruct {}; @@ -81,292 +81,247 @@ struct HasDtor { // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}): +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__Bcnt1__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): // CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!s32i>, %[[ZERO]] : !u64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !s32i, !cir.ptr<!s32i> -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[ONE_2]] : !u64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i> -// -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[TWO]] : !s64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[TWO_2]] : !u64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i> -// -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[THREE]] : !s64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[THREE_2]] : !u64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i> -// -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[FOUR]] : !s64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[FOUR_2]] : !u64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i> +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!s32i> +// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!s32i>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!s32i> +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load{{.*}} %[[STRIDE_FROM]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: cir.store{{.*}} %[[FROM_LOAD]], %[[STRIDE_TO]] : !s32i, !cir.ptr<!s32i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}): +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__Bcnt1__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): // CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!cir.float>, %[[ZERO]] : !u64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !cir.float, !cir.ptr<!cir.float> -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[ONE_2]] : !u64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float> -// -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[TWO]] : !s64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[TWO_2]] : !u64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float> -// -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[THREE]] : !s64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[THREE_2]] : !u64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float> -// -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[FOUR]] : !s64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[FOUR_2]] : !u64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float> +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.float> +// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!cir.float>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.float> +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load{{.*}} %[[STRIDE_FROM]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[FROM_LOAD]], %[[STRIDE_TO]] : !cir.float, !cir.ptr<!cir.float> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_15NoCopyConstruct : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}): +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__Bcnt1__ZTSA5_15NoCopyConstruct : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): // CHECK-NEXT: cir.alloca !cir.array<!rec_NoCopyConstruct x 5>, !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ZERO]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> () -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> () -// -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO]] : !s64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> () -// -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> () -// -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR]] : !s64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> () -// +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> +// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> +// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[STRIDE_TO]], %[[STRIDE_FROM]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_13CopyConstruct : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}): +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__Bcnt1__ZTSA5_13CopyConstruct : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): // CHECK-NEXT: cir.alloca !cir.array<!rec_CopyConstruct x 5>, !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[ZERO]] : !u64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> () -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[ONE]] : !s64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> () -// -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[TWO]] : !s64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> () -// -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[THREE]] : !s64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> () -// -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[FOUR]] : !s64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> () -// +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct> +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_CopyConstruct> +// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_CopyConstruct>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_CopyConstruct> +// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[STRIDE_TO]], %[[STRIDE_FROM]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_14NonDefaultCtor : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}): -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_NonDefaultCtor x 5>, !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__Bcnt1__ZTSA5_14NonDefaultCtor : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.alloca !cir.array<!rec_NonDefaultCtor x 5>, !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ZERO]] : !u64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> () -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ONE]] : !s64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> () -// -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[TWO]] : !s64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> () -// -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[THREE]] : !s64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> () -// -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[FOUR]] : !s64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> () -// +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor> +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_NonDefaultCtor> +// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_NonDefaultCtor> +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[STRIDE_TO]], %[[STRIDE_FROM]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_7HasDtor : !cir.ptr<!cir.array<!rec_HasDtor x 5>> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}): +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__Bcnt1__ZTSA5_7HasDtor : !cir.ptr<!cir.array<!rec_HasDtor x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): // CHECK-NEXT: cir.alloca !cir.array<!rec_HasDtor x 5>, !cir.ptr<!cir.array<!rec_HasDtor x 5>>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[ZERO]] : !u64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> () -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_HasDtor>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> () -// -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_HasDtor>, %[[TWO]] : !s64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> () -// -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_HasDtor>, %[[THREE]] : !s64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> () -// -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_HasDtor>, %[[FOUR]] : !s64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> () -// +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_HasDtor> +// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_HasDtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_HasDtor> +// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[STRIDE_TO]], %[[STRIDE_FROM]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield -// // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}): -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[ELEM:.*]] = cir.ptr_stride(%[[ARRPTR]] : !cir.ptr<!rec_HasDtor>, %[[LAST_IDX]] : !u64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>>, ["__array_idx"] -// CHECK-NEXT: cir.store %[[ELEM]], %[[ITR]] : !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!cir.ptr<!rec_HasDtor>>, !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ELEM_LOAD]]) nothrow : (!cir.ptr<!rec_HasDtor>) -> () -// CHECK-NEXT: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[PREVELEM:.*]] = cir.ptr_stride(%[[ELEM_LOAD]] : !cir.ptr<!rec_HasDtor>, %[[NEG_ONE]] : !s64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: cir.store %[[PREVELEM]], %[[ITR]] : !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>> +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[LAST_SUB_ONE:.*]] = cir.binop(sub, %[[UB_CAST]], %[[ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[LAST_SUB_ONE]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR_LOAD]], %[[LB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasDtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_HasDtor> +// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_HasDtor>) -> () // CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!cir.ptr<!rec_HasDtor>>, !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[ELEM_LOAD]], %[[ARRPTR]]) : !cir.ptr<!rec_HasDtor>, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } +// extern "C" void acc_combined() { // CHECK: cir.func{{.*}} @acc_combined() { @@ -482,7 +437,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1]"} - // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>) { + // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__Bcnt1__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>) { // CHECK-NEXT: acc.loop combined(serial) // CHECK: acc.yield // CHECK-NEXT: } loc @@ -497,7 +452,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1]"} - // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>) { + // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__Bcnt1__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>) { // CHECK-NEXT: acc.loop combined(parallel) // CHECK: acc.yield // CHECK-NEXT: } loc @@ -512,7 +467,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1]"} - // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) { + // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) { // CHECK-NEXT: acc.loop combined(serial) // CHECK: acc.yield // CHECK-NEXT: } loc @@ -527,7 +482,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[HASCOPYARR]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {name = "hasCopyArr[1]"} - // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) { + // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) { // CHECK-NEXT: acc.loop combined(parallel) // CHECK: acc.yield // CHECK-NEXT: } loc @@ -542,7 +497,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOTDEFCTORARR]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {name = "notDefCtorArr[1]"} - // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) { + // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) { // CHECK-NEXT: acc.loop combined(parallel) // CHECK: acc.yield // CHECK-NEXT: } loc @@ -557,7 +512,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[DTORARR]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_HasDtor x 5>> {name = "dtorArr[1]"} - // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) { + // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) { // CHECK-NEXT: acc.loop combined(parallel) // CHECK: acc.yield // CHECK-NEXT: } loc @@ -607,12 +562,12 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE6:.*]] = acc.firstprivate varPtr(%[[DTORARR]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_HasDtor x 5>> {name = "dtorArr[1]"} - // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) + // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__Bcnt1__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) // CHECK-NEXT: acc.loop combined(serial) // CHECK: acc.yield // CHECK-NEXT: } loc @@ -629,7 +584,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1:1]"} - // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>) { + // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__Bcnt1__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>) { // CHECK-NEXT: acc.loop combined(parallel) // CHECK: acc.yield // CHECK-NEXT: } loc @@ -645,7 +600,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1:1]"} - // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>) { + // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__Bcnt1__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>) { // CHECK-NEXT: acc.loop combined(serial) // CHECK: acc.yield // CHECK-NEXT: } loc @@ -661,7 +616,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1:1]"} - // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) { + // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) { // CHECK-NEXT: acc.loop combined(parallel) // CHECK: acc.yield // CHECK-NEXT: } loc @@ -677,7 +632,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[HASCOPYARR]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {name = "hasCopyArr[1:1]"} - // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) { + // CHECK-NEXT: acc.serial combined(loop) firstprivate(@firstprivatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) { // CHECK-NEXT: acc.loop combined(serial) // CHECK: acc.yield // CHECK-NEXT: } loc @@ -693,7 +648,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOTDEFCTORARR]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {name = "notDefCtorArr[1:1]"} - // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) { + // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) { // CHECK-NEXT: acc.loop combined(parallel) // CHECK: acc.yield // CHECK-NEXT: } loc @@ -709,7 +664,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[DTORARR]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_HasDtor x 5>> {name = "dtorArr[1:1]"} - // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) { + // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) { // CHECK-NEXT: acc.loop combined(parallel) // CHECK: acc.yield // CHECK-NEXT: } loc @@ -765,12 +720,12 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE6:.*]] = acc.firstprivate varPtr(%[[DTORARR]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_HasDtor x 5>> {name = "dtorArr[1:1]"} - // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) + // CHECK-NEXT: acc.parallel combined(loop) firstprivate(@firstprivatization__Bcnt1__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) // CHECK-NEXT: acc.loop combined(parallel) // CHECK: acc.yield // CHECK-NEXT: } loc diff --git a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c index de6e7b0..94c2973 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c +++ b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct NoCopyConstruct {}; @@ -34,140 +34,110 @@ struct NoCopyConstruct {}; // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}): +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__Bcnt1__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): // CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!s32i>, %[[ZERO]] : !u64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !s32i, !cir.ptr<!s32i> -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[ONE_2]] : !u64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i> -// -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[TWO]] : !s64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[TWO_2]] : !u64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i> -// -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[THREE]] : !s64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[THREE_2]] : !u64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i> -// -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[FOUR]] : !s64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[FOUR_2]] : !u64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i> +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!s32i> +// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!s32i>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!s32i> +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load{{.*}} %[[STRIDE_FROM]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: cir.store{{.*}} %[[FROM_LOAD]], %[[STRIDE_TO]] : !s32i, !cir.ptr<!s32i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}): +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__Bcnt1__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): // CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!cir.float>, %[[ZERO]] : !u64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !cir.float, !cir.ptr<!cir.float> -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[ONE_2]] : !u64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float> -// -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[TWO]] : !s64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[TWO_2]] : !u64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float> -// -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[THREE]] : !s64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[THREE_2]] : !u64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float> -// -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[FOUR]] : !s64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[FOUR_2]] : !u64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float> +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.float> +// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!cir.float>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.float> +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load{{.*}} %[[STRIDE_FROM]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[FROM_LOAD]], %[[STRIDE_TO]] : !cir.float, !cir.ptr<!cir.float> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_15NoCopyConstruct : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}): +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__Bcnt1__ZTSA5_15NoCopyConstruct : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): // CHECK-NEXT: cir.alloca !cir.array<!rec_NoCopyConstruct x 5>, !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i // CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ZERO]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: cir.copy %[[FROM_OFFSET:.*]] to %[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct> -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: cir.copy %[[FROM_OFFSET]] to %[[TO_OFFSET]] : !cir.ptr<!rec_NoCopyConstruct> -// -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO]] : !s64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: cir.copy %[[FROM_OFFSET]] to %[[TO_OFFSET]] : !cir.ptr<!rec_NoCopyConstruct> -// -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: cir.copy %[[FROM_OFFSET]] to %[[TO_OFFSET]] : !cir.ptr<!rec_NoCopyConstruct> -// -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR]] : !s64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: cir.copy %[[FROM_OFFSET]] to %[[TO_OFFSET]] : !cir.ptr<!rec_NoCopyConstruct> -// +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> +// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> +// CHECK-NEXT: cir.copy %[[STRIDE_FROM]] to %[[STRIDE_TO]] : !cir.ptr<!rec_NoCopyConstruct> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -227,7 +197,7 @@ void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1]"} - // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>) + // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__Bcnt1__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel firstprivate(someFloatArr[1]) @@ -239,7 +209,7 @@ void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1]"} - // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>) + // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__Bcnt1__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc serial firstprivate(noCopyArr[1]) @@ -251,7 +221,7 @@ void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1]"} - // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) + // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc serial firstprivate(someIntArr[1], someFloatArr[1], noCopyArr[1]) @@ -277,9 +247,9 @@ void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1]"} - // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) + // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__Bcnt1__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc @@ -293,7 +263,7 @@ void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1:1]"} - // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>) + // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__Bcnt1__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc serial firstprivate(someFloatArr[1:1]) @@ -306,7 +276,7 @@ void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1:1]"} - // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>) + // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__Bcnt1__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel firstprivate(noCopyArr[1:1]) @@ -319,7 +289,7 @@ void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1:1]"} - // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) + // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel firstprivate(someIntArr[1:1], someFloatArr[1:1], noCopyArr[1:1]) @@ -348,9 +318,9 @@ void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1:1]"} - // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) + // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__Bcnt1__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc } diff --git a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp index fca3ca8..1e174bb 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct NoCopyConstruct {}; @@ -81,292 +81,247 @@ struct HasDtor { // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}): +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__Bcnt1__ZTSA5_i : !cir.ptr<!cir.array<!s32i x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): // CHECK-NEXT: cir.alloca !cir.array<!s32i x 5>, !cir.ptr<!cir.array<!s32i x 5>>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!s32i>, %[[ZERO]] : !u64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !s32i, !cir.ptr<!s32i> -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[ONE]] : !s64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[ONE_2]] : !u64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i> -// -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[TWO]] : !s64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[TWO_2]] : !u64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i> -// -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[THREE]] : !s64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[THREE_2]] : !u64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i> -// -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!s32i>, %[[FOUR]] : !s64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[FOUR_2]] : !u64i), !cir.ptr<!s32i> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!s32i>, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr<!s32i> +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!s32i>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!s32i> +// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!s32i>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!s32i> +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load{{.*}} %[[STRIDE_FROM]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: cir.store{{.*}} %[[FROM_LOAD]], %[[STRIDE_TO]] : !s32i, !cir.ptr<!s32i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}): +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__Bcnt1__ZTSA5_f : !cir.ptr<!cir.array<!cir.float x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): // CHECK-NEXT: cir.alloca !cir.array<!cir.float x 5>, !cir.ptr<!cir.array<!cir.float x 5>>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!cir.float>, %[[ZERO]] : !u64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !cir.float, !cir.ptr<!cir.float> -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[ONE]] : !s64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[ONE_2]] : !u64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float> -// -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[TWO]] : !s64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[TWO_2]] : !u64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float> -// -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[THREE]] : !s64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[THREE_2]] : !u64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float> -// -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!cir.float>, %[[FOUR]] : !s64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[FOUR_2]] : !u64i), !cir.ptr<!cir.float> -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr<!cir.float>, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr<!cir.float> +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!cir.float>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.float> +// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!cir.float>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.float> +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load{{.*}} %[[STRIDE_FROM]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[FROM_LOAD]], %[[STRIDE_TO]] : !cir.float, !cir.ptr<!cir.float> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_15NoCopyConstruct : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}): +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__Bcnt1__ZTSA5_15NoCopyConstruct : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): // CHECK-NEXT: cir.alloca !cir.array<!rec_NoCopyConstruct x 5>, !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ZERO]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> () -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> () -// -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO]] : !s64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> () -// -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE]] : !s64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> () -// -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR]] : !s64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> -// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> () -// +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> +// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> -> !cir.ptr<!rec_NoCopyConstruct> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NoCopyConstruct>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_NoCopyConstruct> +// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[STRIDE_TO]], %[[STRIDE_FROM]]) nothrow : (!cir.ptr<!rec_NoCopyConstruct>, !cir.ptr<!rec_NoCopyConstruct>) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_13CopyConstruct : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}): +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__Bcnt1__ZTSA5_13CopyConstruct : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): // CHECK-NEXT: cir.alloca !cir.array<!rec_CopyConstruct x 5>, !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[ZERO]] : !u64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> () -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[ONE]] : !s64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> () -// -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[TWO]] : !s64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> () -// -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[THREE]] : !s64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> () -// -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_CopyConstruct>, %[[FOUR]] : !s64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_CopyConstruct> -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> () -// +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct> +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_CopyConstruct>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_CopyConstruct> +// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> -> !cir.ptr<!rec_CopyConstruct> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_CopyConstruct>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_CopyConstruct> +// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[STRIDE_TO]], %[[STRIDE_FROM]]) : (!cir.ptr<!rec_CopyConstruct>, !cir.ptr<!rec_CopyConstruct>) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_14NonDefaultCtor : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}): -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_NonDefaultCtor x 5>, !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__Bcnt1__ZTSA5_14NonDefaultCtor : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.alloca !cir.array<!rec_NonDefaultCtor x 5>, !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ZERO]] : !u64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> () -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ONE]] : !s64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> () -// -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[TWO]] : !s64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> () -// -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[THREE]] : !s64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> () -// -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_NonDefaultCtor>, %[[FOUR]] : !s64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_NonDefaultCtor> -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> () -// +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor> +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_NonDefaultCtor> +// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> -> !cir.ptr<!rec_NonDefaultCtor> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_NonDefaultCtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_NonDefaultCtor> +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[STRIDE_TO]], %[[STRIDE_FROM]]) nothrow : (!cir.ptr<!rec_NonDefaultCtor>, !cir.ptr<!rec_NonDefaultCtor>) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_7HasDtor : !cir.ptr<!cir.array<!rec_HasDtor x 5>> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}): +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__Bcnt1__ZTSA5_7HasDtor : !cir.ptr<!cir.array<!rec_HasDtor x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): // CHECK-NEXT: cir.alloca !cir.array<!rec_HasDtor x 5>, !cir.ptr<!cir.array<!rec_HasDtor x 5>>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[ZERO]] : !u64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> () -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_HasDtor>, %[[ONE]] : !s64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[ONE_2]] : !u64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> () -// -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_HasDtor>, %[[TWO]] : !s64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[TWO_2]] : !u64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> () -// -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_HasDtor>, %[[THREE]] : !s64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[THREE_2]] : !u64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> () -// -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr<!rec_HasDtor>, %[[FOUR]] : !s64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[FOUR_2]] : !u64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> () -// +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr<!rec_HasDtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_HasDtor> +// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr<!rec_HasDtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_HasDtor> +// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[STRIDE_TO]], %[[STRIDE_FROM]]) nothrow : (!cir.ptr<!rec_HasDtor>, !cir.ptr<!rec_HasDtor>) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield -// // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}): -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[ELEM:.*]] = cir.ptr_stride(%[[ARRPTR]] : !cir.ptr<!rec_HasDtor>, %[[LAST_IDX]] : !u64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>>, ["__array_idx"] -// CHECK-NEXT: cir.store %[[ELEM]], %[[ITR]] : !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!cir.ptr<!rec_HasDtor>>, !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ELEM_LOAD]]) nothrow : (!cir.ptr<!rec_HasDtor>) -> () -// CHECK-NEXT: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[PREVELEM:.*]] = cir.ptr_stride(%[[ELEM_LOAD]] : !cir.ptr<!rec_HasDtor>, %[[NEG_ONE]] : !s64i), !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: cir.store %[[PREVELEM]], %[[ITR]] : !cir.ptr<!rec_HasDtor>, !cir.ptr<!cir.ptr<!rec_HasDtor>> +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasDtor x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[LAST_SUB_ONE:.*]] = cir.binop(sub, %[[UB_CAST]], %[[ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[LAST_SUB_ONE]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR_LOAD]], %[[LB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>> -> !cir.ptr<!rec_HasDtor> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!rec_HasDtor>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!rec_HasDtor> +// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_HasDtor>) -> () // CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!cir.ptr<!rec_HasDtor>>, !cir.ptr<!rec_HasDtor> -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[ELEM_LOAD]], %[[ARRPTR]]) : !cir.ptr<!rec_HasDtor>, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } +// extern "C" void acc_compute() { // CHECK: cir.func{{.*}} @acc_compute() { @@ -461,7 +416,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1]"} - // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>) + // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__Bcnt1__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel firstprivate(someFloatArr[1]) @@ -473,7 +428,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1]"} - // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>) + // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__Bcnt1__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc serial firstprivate(noCopyArr[1]) @@ -485,7 +440,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1]"} - // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) + // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel firstprivate(hasCopyArr[1]) @@ -497,7 +452,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[HASCOPYARR]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {name = "hasCopyArr[1]"} - // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) + // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel firstprivate(notDefCtorArr[1]) @@ -509,7 +464,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOTDEFCTORARR]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {name = "notDefCtorArr[1]"} - // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) + // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel firstprivate(dtorArr[1]) @@ -521,7 +476,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[DTORARR]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_HasDtor x 5>> {name = "dtorArr[1]"} - // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) + // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc serial firstprivate(someIntArr[1], someFloatArr[1], noCopyArr[1], hasCopyArr[1], notDefCtorArr[1], dtorArr[1]) @@ -568,12 +523,12 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE6:.*]] = acc.firstprivate varPtr(%[[DTORARR]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_HasDtor x 5>> {name = "dtorArr[1]"} - // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) + // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__Bcnt1__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc @@ -587,7 +542,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[INTARR]] : !cir.ptr<!cir.array<!s32i x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!s32i x 5>> {name = "someIntArr[1:1]"} - // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>) + // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__Bcnt1__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr<!cir.array<!s32i x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc serial firstprivate(someFloatArr[1:1]) @@ -600,7 +555,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[FLOATARR]] : !cir.ptr<!cir.array<!cir.float x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!cir.float x 5>> {name = "someFloatArr[1:1]"} - // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>) + // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__Bcnt1__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.float x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel firstprivate(noCopyArr[1:1]) @@ -613,7 +568,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOCOPYARR]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>> {name = "noCopyArr[1:1]"} - // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) + // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc serial firstprivate(hasCopyArr[1:1]) @@ -626,7 +581,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[HASCOPYARR]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_CopyConstruct x 5>> {name = "hasCopyArr[1:1]"} - // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) + // CHECK-NEXT: acc.serial firstprivate(@firstprivatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel firstprivate(notDefCtorArr[1:1]) @@ -639,7 +594,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[NOTDEFCTORARR]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>> {name = "notDefCtorArr[1:1]"} - // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) + // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel firstprivate(dtorArr[1:1]) @@ -652,7 +607,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.firstprivate varPtr(%[[DTORARR]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_HasDtor x 5>> {name = "dtorArr[1:1]"} - // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) + // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel firstprivate(someIntArr[1:1], someFloatArr[1:1], noCopyArr[1:1], hasCopyArr[1:1], notDefCtorArr[1:1], dtorArr[1:1]) @@ -705,12 +660,12 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE6:.*]] = acc.firstprivate varPtr(%[[DTORARR]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) bounds(%[[BOUNDS]]) -> !cir.ptr<!cir.array<!rec_HasDtor x 5>> {name = "dtorArr[1:1]"} - // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, - // CHECK-SAME: @firstprivatization__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) + // CHECK-NEXT: acc.parallel firstprivate(@firstprivatization__Bcnt1__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr<!cir.array<!s32i x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr<!cir.array<!cir.float x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr<!cir.array<!rec_NoCopyConstruct x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr<!cir.array<!rec_CopyConstruct x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr<!cir.array<!rec_NonDefaultCtor x 5>>, + // CHECK-SAME: @firstprivatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr<!cir.array<!rec_HasDtor x 5>>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc } diff --git a/clang/test/CIR/CodeGenOpenACC/firstprivate-clause-recipes.cpp b/clang/test/CIR/CodeGenOpenACC/firstprivate-clause-recipes.cpp new file mode 100644 index 0000000..e10d737 --- /dev/null +++ b/clang/test/CIR/CodeGenOpenACC/firstprivate-clause-recipes.cpp @@ -0,0 +1,691 @@ +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s + +// Note: unlike the 'private' recipe checks, this is just for spot-checking, +// so this test isn't as comprehensive. The same code paths are used for +// 'private', so we just count on those to catch the errors. +struct NoOps { + int i; + ~NoOps(); +}; + +struct CtorDtor { + int i; + CtorDtor(); + ~CtorDtor(); +}; + +void do_things(unsigned A, unsigned B) { + NoOps ThreeArr[5][5][5]; + +#pragma acc parallel firstprivate(ThreeArr[B][B][B]) +// CHECK:acc.firstprivate.recipe @firstprivatization__Bcnt3__ZTSA5_A5_A5_5NoOps : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>, !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>>, ["openacc.firstprivate.init"] {alignment = 4 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[TLA_DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> -> !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> +// CHECK-NEXT: %[[BOUND3_STRIDE_FROM:.*]] = cir.ptr_stride(%[[TLA_DECAY_FROM]] : !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> +// CHECK-NEXT: %[[TLA_DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> -> !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> +// CHECK-NEXT: %[[BOUND3_STRIDE_TO:.*]] = cir.ptr_stride(%[[TLA_DECAY_TO]] : !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE_FROM]] : !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> -> !cir.ptr<!cir.array<!rec_NoOps x 5>> +// CHECK-NEXT: %[[BOUND2_STRIDE_FROM:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY_FROM]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_NoOps x 5>> +// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE_TO]] : !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> -> !cir.ptr<!cir.array<!rec_NoOps x 5>> +// CHECK-NEXT: %[[BOUND2_STRIDE_TO:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY_TO]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_NoOps x 5>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE_FROM]] : !cir.ptr<!cir.array<!rec_NoOps x 5>> -> !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[BOUND1_STRIDE_FROM:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY_FROM]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE_TO]] : !cir.ptr<!cir.array<!rec_NoOps x 5>> -> !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[BOUND1_STRIDE_TO:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY_TO]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps> +// CHECK-NEXT: cir.call @_ZN5NoOpsC1ERKS_(%[[BOUND1_STRIDE_TO]], %[[BOUND1_STRIDE_FROM]]) nothrow : (!cir.ptr<!rec_NoOps>, !cir.ptr<!rec_NoOps>) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[LAST_SUB_ONE:.*]] = cir.binop(sub, %[[UB3_CAST]], %[[ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[LAST_SUB_ONE]], %[[ITR3]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR3_LOAD]], %[[LB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[PRIVATE]] : !cir.ptr<!cir.array<!cir.array<!cir.array<!rec_NoOps x 5> x 5> x 5>> -> !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[LAST_SUB_ONE:.*]] = cir.binop(sub, %[[UB2_CAST]], %[[ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[LAST_SUB_ONE]], %[[ITR2]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR2_LOAD]], %[[LB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE]] : !cir.ptr<!cir.array<!cir.array<!rec_NoOps x 5> x 5>> -> !cir.ptr<!cir.array<!rec_NoOps x 5>> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY]] : !cir.ptr<!cir.array<!rec_NoOps x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.array<!rec_NoOps x 5>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[LAST_SUB_ONE:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[LAST_SUB_ONE]], %[[ITR1]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr<!cir.array<!rec_NoOps x 5>> -> !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[BOUND1_STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps> +// CHECK-NEXT: cir.call @_ZN5NoOpsD1Ev(%[[BOUND1_STRIDE]]) nothrow : (!cir.ptr<!rec_NoOps>) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT:acc.yield +// CHECK-NEXT:} + ; + + NoOps ***ThreePtr; +#pragma acc parallel firstprivate(ThreePtr[B][B][A:B]) +// CHECK: acc.firstprivate.recipe @firstprivatization__Bcnt3__ZTSPPP5NoOps : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, ["openacc.firstprivate.init"] {alignment = 8 : i64} +// CHECK-NEXT: %[[INT_PTR_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[INT_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i +// CHECK-NEXT: %[[SIZEOF_PTR_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_PTR_ALLOCA:.*]] = cir.alloca !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST_3:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS_2:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[NUM_ELTS]]) : !u64i +// CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS_2]], %[[SIZEOF_INT]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_ALLOCA:.*]] = cir.alloca !rec_NoOps, !cir.ptr<!rec_NoOps>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_ALLOCA]] : !cir.ptr<!rec_NoOps>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr<!rec_NoOps>, !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr<!u64i> + +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[TLA_LOAD_FROM:.*]] = cir.load %[[ARG_FROM]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: %[[BOUND3_STRIDE_FROM:.*]] = cir.ptr_stride(%[[TLA_LOAD_FROM]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: %[[TLA_LOAD_TO:.*]] = cir.load %[[ARG_TO]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: %[[BOUND3_STRIDETO:.*]] = cir.ptr_stride(%[[TLA_LOAD_TO]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_LOAD_FROM:.*]] = cir.load %[[BOUND3_STRIDE_FROM]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[BOUND2_STRIDE_FROM:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_LOAD_FROM]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[BOUND3_STRIDE_LOAD_TO:.*]] = cir.load %[[BOUND3_STRIDE_TO]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[BOUND2_STRIDE_TO:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_LOAD_TO]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD_FROM:.*]] = cir.load %[[BOUND2_STRIDE_FROM]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD_FROM]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD_TO:.*]] = cir.load %[[BOUND2_STRIDE_TO]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD_TO]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps> +// CHECK-NEXT: cir.call @_ZN5NoOpsC1ERKS_(%[[BOUND1_STRIDE_TO]], %[[BOUND1_STRIDE_FROM]]) nothrow : (!cir.ptr<!rec_NoOps>, !cir.ptr<!rec_NoOps>) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB3:.*]] = cir.binop(sub, %[[UB3_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB3]], %[[ITR3]] : !u64i, !cir.ptr<!u64i> + +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR3_LOAD]], %[[LB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[PRIVATE]] : !cir.ptr<!cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>>, !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB2:.*]] = cir.binop(sub, %[[UB2_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB2]], %[[ITR2]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR2_LOAD]], %[[LB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr<!cir.ptr<!cir.ptr<!rec_NoOps>>>, !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_LOAD]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB1:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB1]], %[[ITR1]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr<!rec_NoOps>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_NoOps> +// CHECK-NEXT: cir.call @_ZN5NoOpsD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr<!rec_NoOps>) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } +; + using PtrTArrayTy = CtorDtor*[5]; + PtrTArrayTy *PtrArrayPtr; + +#pragma acc parallel firstprivate(PtrArrayPtr[B][B][B]) +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__Bcnt3__ZTSPA5_P8CtorDtor : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>>, ["openacc.firstprivate.init"] {alignment = 8 : i64} +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<40> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array<!cir.ptr<!rec_CtorDtor> x 5>, !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i +// +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARR_ALLOCA]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> -> !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ZERO]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i +// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr<!rec_CtorDtor>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr<!rec_CtorDtor>, %[[SRC_IDX]] : !u64i), !cir.ptr<!rec_CtorDtor> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr<!rec_CtorDtor>, !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[TLA_LOAD_FROM:.*]] = cir.load %[[ARG_FROM]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>>, !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> +// CHECK-NEXT: %[[BOUND3_STRIDE_FROM:.*]] = cir.ptr_stride(%[[TLA_LOAD_FROM]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> +// CHECK-NEXT: %[[TLA_LOAD_TO:.*]] = cir.load %[[ARG_TO]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>>, !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> +// CHECK-NEXT: %[[BOUND3_STRIDE_TO:.*]] = cir.ptr_stride(%[[TLA_LOAD_TO]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE_FROM]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> -> !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: %[[BOUND2_STRIDE_FROM:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY_FROM]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY_TO:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE_TO]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> -> !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: %[[BOUND2_STRIDE_TO:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY_TO]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD_FROM:.*]] = cir.load %[[BOUND2_STRIDE_FROM]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!rec_CtorDtor> +// CHECK-NEXT: %[[STRIDE_FROM:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD_FROM]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor> +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD_TO:.*]] = cir.load %[[BOUND2_STRIDE_TO]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!rec_CtorDtor> +// CHECK-NEXT: %[[STRIDE_TO:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD_TO]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor> +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1ERKS_(%[[BOUND1_STRIDE_TO]], %[[BOUND1_STRIDE_FROM]]) nothrow : (!cir.ptr<!rec_CtorDtor>, !cir.ptr<!rec_CtorDtor>) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB3:.*]] = cir.binop(sub, %[[UB3_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB3]], %[[ITR3]] : !u64i, !cir.ptr<!u64i> +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR3_LOAD]], %[[LB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[PRIVATE]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>>, !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB2:.*]] = cir.binop(sub, %[[UB2_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB2]], %[[ITR2]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR2_LOAD]], %[[LB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE]] : !cir.ptr<!cir.array<!cir.ptr<!rec_CtorDtor> x 5>> -> !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr<!cir.ptr<!rec_CtorDtor>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB1:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB1]], %[[ITR1]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!rec_CtorDtor>>, !cir.ptr<!rec_CtorDtor> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr<!rec_CtorDtor>, %[[ITR1_LOAD]] : !u64i), !cir.ptr<!rec_CtorDtor> +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) : (!cir.ptr<!rec_CtorDtor>) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +} diff --git a/clang/test/ClangScanDeps/modules-context-hash-from-named-module.cpp b/clang/test/ClangScanDeps/modules-context-hash-from-named-module.cpp new file mode 100644 index 0000000..c272022 --- /dev/null +++ b/clang/test/ClangScanDeps/modules-context-hash-from-named-module.cpp @@ -0,0 +1,121 @@ +// Checks that driver-generated options for C++ module inputs preserve the +// canonical module build commands compared to an equivalent non-module input, +// and that they do not produce additional internal scanning PCMs. + +// RUN: rm -rf %t +// RUN: split-file %s %t + +//--- main.cpp +#include "root.h" +import A; +import B; + +auto main() -> int { return 1; } + +//--- A.cppm +module; +#include "root.h" +export module A; + +//--- B.cppm +module; +#include "root.h" +export module B; + +//--- module.modulemap +module root { header "root.h" } + +//--- root.h +// empty + +// RUN: %clang -std=c++23 -fmodules \ +// RUN: -fmodules-cache-path=%t/modules-cache \ +// RUN: %t/main.cpp %t/A.cppm %t/B.cppm \ +// RUN: -fsyntax-only -fdriver-only -MJ %t/deps.json +// +// RUN: sed -e '1s/^/[/' -e '$s/,$/]/' -e 's:\\\\\?:/:g' %t/deps.json \ +// RUN: > %t/compile_commands.json +// +// RUN: clang-scan-deps \ +// RUN: -compilation-database=%t/compile_commands.json \ +// RUN: -format experimental-full \ +// RUN: | sed 's:\\\\\?:/:g' | FileCheck %s -DPREFIX=%/t + +// CHECK: { +// CHECK-NEXT: "modules": [ +// CHECK-NEXT: { +// CHECK-NEXT: "clang-module-deps": [], +// CHECK-NEXT: "clang-modulemap-file": "[[PREFIX]]/module.modulemap", +// CHECK: "context-hash": "[[HASH_ROOT:.*]]", +// CHECK-NEXT: "file-deps": [ +// CHECK-NEXT: "[[PREFIX]]/module.modulemap", +// CHECK-NEXT: "[[PREFIX]]/root.h" +// CHECK-NEXT: ], +// CHECK-NEXT: "link-libraries": [], +// CHECK-NEXT: "name": "root" +// CHECK-NEXT: } +// CHECK-NEXT: ], +// CHECK-NEXT: "translation-units": [ +// CHECK-NEXT: { +// CHECK-NEXT: "commands": [ +// CHECK-NEXT: { +// CHECK-NEXT: "clang-context-hash": "{{.*}}", +// CHECK-NEXT: "named-module-deps": [ +// CHECK-NEXT: "A", +// CHECK-NEXT: "B" +// CHECK-NEXT: ], +// CHECK-NEXT: "clang-module-deps": [ +// CHECK-NEXT: { +// CHECK-NEXT: "context-hash": "[[HASH_ROOT]]", +// CHECK-NEXT: "module-name": "root" +// CHECK-NEXT: } +// CHECK-NEXT: ], +// CHECK: "file-deps": [ +// CHECK-NEXT: "[[PREFIX]]/main.cpp" +// CHECK-NEXT: ], +// CHECK-NEXT: "input-file": "[[PREFIX]]/main.cpp" +// CHECK-NEXT: } +// CHECK-NEXT: ] +// CHECK-NEXT: }, +// CHECK-NEXT: { +// CHECK-NEXT: "commands": [ +// CHECK-NEXT: { +// CHECK-NEXT: "clang-context-hash": "{{.*}}", +// CHECK-NEXT: "named-module": "A", +// CHECK-NEXT: "clang-module-deps": [ +// CHECK-NEXT: { +// CHECK-NEXT: "context-hash": "[[HASH_ROOT]]", +// CHECK-NEXT: "module-name": "root" +// CHECK-NEXT: } +// CHECK-NEXT: ], +// CHECK: "file-deps": [ +// CHECK-NEXT: "[[PREFIX]]/A.cppm" +// CHECK-NEXT: ], +// CHECK-NEXT: "input-file": "[[PREFIX]]/A.cppm" +// CHECK-NEXT: } +// CHECK-NEXT: ] +// CHECK-NEXT: }, +// CHECK-NEXT: { +// CHECK-NEXT: "commands": [ +// CHECK-NEXT: { +// CHECK-NEXT: "clang-context-hash": "{{.*}}", +// CHECK-NEXT: "named-module": "B", +// CHECK-NEXT: "clang-module-deps": [ +// CHECK-NEXT: { +// CHECK-NEXT: "context-hash": "[[HASH_ROOT]]", +// CHECK-NEXT: "module-name": "root" +// CHECK-NEXT: } +// CHECK-NEXT: ], +// CHECK: "file-deps": [ +// CHECK-NEXT: "[[PREFIX]]/B.cppm" +// CHECK-NEXT: ], +// CHECK-NEXT: "input-file": "[[PREFIX]]/B.cppm" +// CHECK-NEXT: } +// CHECK-NEXT: ] +// CHECK-NEXT: } +// CHECK-NEXT: ] +// CHECK-NEXT: } + +// This tests that the scanner doesn't produce multiple internal scanning PCMs +// for our single Clang module (root). +// RUN: find %t/modules-cache -name "*.pcm" | wc -l | grep 1 diff --git a/clang/test/CodeGen/X86/avx512f-builtins.c b/clang/test/CodeGen/X86/avx512f-builtins.c index 47cb485a..7756f0d 100644 --- a/clang/test/CodeGen/X86/avx512f-builtins.c +++ b/clang/test/CodeGen/X86/avx512f-builtins.c @@ -6273,6 +6273,78 @@ __m512i test_mm512_ternarylogic_epi32(__m512i __A, __m512i __B, __m512i __C) { // CHECK: @llvm.x86.avx512.pternlog.d.512({{.*}}, i32 240) return _mm512_ternarylogic_epi32(__A, __B, __C, _MM_TERNLOG_A); } +TEST_CONSTEXPR(match_v16si( + _mm512_ternarylogic_epi32( + ((__m512i)((__v16si){ + 0x6AA79987, (int)0xBB91433A, 0x029A7245, (int)0xD1F6F86C, + (int)0xD340BBCD, (int)0xCD8778E7, 0x4C73A942, (int)0xDAEA58BA, + 0x5E503A67, (int)0xEE897110, 0x3193CA54, 0x452EC40A, + (int)0x90E5E945, 0x6FACAA50, 0x29645F8B, 0x5F811CB9 + })), + ((__m512i)((__v16si){ + 0x1FCFF454, (int)0xDFC9E3B1, 0x6ED4E94B, 0x42D6CB5C, + (int)0x8FE46024, (int)0xA091250E, 0x2CA1C789, (int)0x9C9CEA0C, + (int)0x8D9FE5B9, 0x2FD2B7A4, 0x5ADAD121, (int)0xBCF74D7A, + (int)0xF543BBCF, (int)0xBB9D58E4, 0x175F0CD2, (int)0x87F26AEE + })), + ((__m512i)((__v16si){ + (int)0xFA882692, (int)0xBC428D42, 0x6980A81F, (int)0x95C5FB98, + (int)0x8101E89A, 0x2AA4857E, 0x25ECE845, 0x34A9AF41, + (int)0xB80E3B0D, 0x13ED748B, 0x30A1F6D5, (int)0xD64A3CE0, + 0x57708107, 0x527122DC, 0x06057C82, 0x7576714A + })), + (unsigned char)0x11), // ~A & ~C + 0x00300929, 0x0034100C, (int)0x902B16A0, 0x28280423, + 0x701A1741, 0x554A5A81, (int)0xD2121032, 0x434210B2, + 0x42600042, (int)0xC0000850, (int)0x8504080A, 0x01008205, + 0x088C4430, 0x04028503, (int)0xE8A0832D, 0x08098411)); +TEST_CONSTEXPR(match_v16si( + _mm512_ternarylogic_epi32( + ((__m512i)((__v16si){ + (int)0xA3B1799D, (int)0x46685257, (int)0x392456DE, (int)0xBC8960A9, + (int)0x6C031199, (int)0x07A0CA6E, (int)0x37F8A88B, (int)0x8B8148F6, + (int)0x386ECBE0, (int)0x96DA1DAC, (int)0xCE4A2BBD, (int)0xB2B9437A, + (int)0x571AA876, (int)0x27CD8130, (int)0x562B0F79, (int)0x17BE3111 + })), + ((__m512i)((__v16si){ + (int)0x18C26797, (int)0xD8F56413, (int)0x9A8DCA03, (int)0xCE9FF57F, + (int)0xBACFB3D0, (int)0x89463E85, (int)0x60E7A113, (int)0x8D5288F1, + (int)0xDC98D2C1, (int)0x93CD59BF, (int)0xB45ED1F0, (int)0x19DB3AD0, + (int)0x47294739, (int)0x5D65A441, (int)0x5EC42E08, (int)0xA5E5A5AB + })), + ((__m512i)((__v16si){ + (int)0xBAA80DD4, (int)0x29D4BEEF, (int)0x6123FDF7, (int)0x8E944239, + (int)0xAF42E12F, (int)0xC6A7EE39, (int)0x50C187FC, (int)0x448AAA9E, + (int)0x508EBAD7, (int)0xA7CAD415, (int)0x757750A9, (int)0x43CF2FDE, + (int)0x95A76D79, (int)0x663F1C97, (int)0xFF5E9FF0, (int)0x827050A8 + })), + (unsigned char)0x38), // (C & ~B) | (~C & A & B) + (int)0xBB311C08, (int)0x0E9C3644, (int)0x21219CDD, (int)0x32140090, + (int)0xC640A009, (int)0x86A6E46B, (int)0x57190998, (int)0x0683C006, + (int)0x60E61921, (int)0x05124411, (int)0x7A147A0D, (int)0xA36269AA, + (int)0x1033ED4F, (int)0x62A80531, (int)0x086F0171, (int)0x925A10B8)); +TEST_CONSTEXPR(match_v16si( + _mm512_ternarylogic_epi32( + ((__m512i)((__v16si){ + (int)0x3193CA54, (int)0x90E5E945, (int)0x29645F8B, (int)0x6ED4E94B, + (int)0x8D9FE5B9, (int)0x8101E89A, (int)0x25ECE845, (int)0xB80E3B0D, + (int)0x57708107, (int)0x06057C82, (int)0x56EAA301, (int)0xBE99854A, + (int)0x00E266D0, (int)0xDEEA959E, (int)0x2DCAABD5, (int)0x6A1ECCDA})), + ((__m512i)((__v16si){ + (int)0x93FD7234, (int)0xBC90A6EC, (int)0xD3285151, (int)0xCE9FB6A8, + (int)0x3B788B66, (int)0xDF8960AD, (int)0x2F927291, (int)0x96AF0DEA, + (int)0xF56AE7EA, (int)0x2A04F77A, (int)0xD50B612B, (int)0x3AA725CB, + (int)0x8A04F74F, (int)0x282FE557, (int)0x52E1FBB0, (int)0x0CA02F4D})), + ((__m512i)((__v16si){ + (int)0xB6307BAD, (int)0x141CB03E, (int)0xEBAA7701, (int)0xC9F0B072, + (int)0x5E2503DD, (int)0xC2E1DAC4, (int)0x0FC01B11, (int)0xA0485922, + (int)0x339BB47E, (int)0xB2D4F32A, (int)0x8E7AE9AF, (int)0x147DE9B0, + (int)0xF79FCAA0, (int)0x3B0B6398, (int)0x29DDF4C7, (int)0x49CDBEC7})), + (unsigned char)0xC3), // ~(B ^ C) + (int)0x5D91479F, (int)0xD38AB056, (int)0x05B3F125, (int)0x5FB4A01C, + (int)0x49189120, (int)0xA17777C8, (int)0xF581652B, (int)0xD15EC918, + (int)0x5DE59912, (int)0xD3FE7407, (int)0x7C1E3DD5, (int)0x7BC15F7E, + (int)0x75196E60, (int)0x093A8F36, (int)0x80D4AF9A, (int)0x99411C68)); __m512i test_mm512_mask_ternarylogic_epi32(__m512i __A, __mmask16 __U, __m512i __B, __m512i __C) { // CHECK-LABEL: test_mm512_mask_ternarylogic_epi32 @@ -6280,6 +6352,63 @@ __m512i test_mm512_mask_ternarylogic_epi32(__m512i __A, __mmask16 __U, __m512i _ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}} return _mm512_mask_ternarylogic_epi32(__A, __U, __B, __C, _MM_TERNLOG_B); } +TEST_CONSTEXPR(match_v16si( + _mm512_mask_ternarylogic_epi32( + _mm512_setr_epi32( + (int)0xFFFFFFFF, 0x00000000, (int)0xDEADBEEF, (int)0xCAFEBABE, 0x12345678, (int)0x87654321, + (int)0xAAAAAAAA, 0x55555555, (int)0xF00DBEEF, (int)0xBAD2FEAF, 0x0112358D, (int)0xDEADF00D, + (int)0x8BADF00D, (int)0xBADDCAFE, (int)0xBAADF00D, (int)0xBAAAAAAD), + (__mmask16)0x9D71, + _mm512_setr_epi32( + 0x11111111, 0x22222222, 0x33333333, 0x44444444, (int)0xABCDEF01, (int)0xFEDCBA98, + (int)0xCCCCCCCC, 0x33333333, 0x1337BEEF, 0x01010101, (int)0x81321345, (int)0xBAADF00D, + 0x1BADB002, 0x5EE7C0DE, 0x12345678, 0x55555555), + _mm512_setr_epi32( + (int)0xF0F0F0F0, 0x0F0F0F0F, 0x1234ABCD, (int)0x9876FEDC, 0x00FF00FF, (int)0xFF00FF00, + (int)0xFF0000FF, 0x00FFFF00, 0x50D4CAFE, (int)0x8BADF00D, (int)0xABCDEFFF, (int)0xFEEDF00D, + (int)0xBEEFCAFE, (int)0xDEADC0DE, (int)0x1BADBEEF, 0x33333333), + (unsigned char)0xB1), // op: (~B & (A | ~C)) | (B & A & C) + (int)0xFEFEFEFE, 0x00000000, (int)0xDEADBEEF, (int)0xCAFEBABE, 0x54341078, (int)0x87234367, + (int)0xAA3333AA, 0x55555555, (int)0xFC0C8BEE, (int)0xBAD2FEAF, 0x5500258D, (int)0xDFBFFFFF, + (int)0xCABDC50D, (int)0xBADDCAFE, (int)0xBAADF00D, (int)0xBAAAAAA9)); +TEST_CONSTEXPR(match_v16si( + _mm512_mask_ternarylogic_epi32( + _mm512_setr_epi32( + 0x0000FFFF, (int)0xFFFF0000, 0x01010101, (int)0xFF00FF00, (int)0xAAAAAAAA, 0x33333333, + (int)0xF0F0F0F0, 0x0F0F0F0F, 0x12345678, (int)0x87654321, 0x7FFFFFFF, (int)0xDEADBEEF, + (int)0xCAFEBABE, 0x01234567, (int)0xABCDEF01, (int)0xFEDCBA98), + (__mmask16)0x3C3C, + _mm512_setr_epi32( + 0x1111EEEE, 0x2222DDDD, (int)0x80808080, 0x00FF00FF, 0x55555555, 0x00000000, + (int)0xCCCCCCCC, 0x33333333, 0x11111111, 0x22222222, (int)0x80000000, 0x12345678, + 0x11223344, (int)0xFEDCBA98, (int)0xBAD0BAD0, (int)0xBEEFCAFE), + _mm512_setr_epi32( + 0x12345678, (int)0x87654321, 0x7F7F7F7F, (int)0xFEDCBA98, (int)0xCCCCCCCC, (int)0xFFFFFFFF, + 0x11111111, 0x22222222, (int)0xABABABAB, (int)0xCDCDCDCD, 0x00000001, (int)0xFACEB00C, + 0x55667788, (int)0xABCDEF01, 0x12345678, (int)0xDEADBEEF), + (unsigned char)0xE8), // op: (A & B) | (B & C) | (C & A) (Majority) + 0x0000FFFF, (int)0xFFFF0000, 0x01010101, (int)0xFEDCBA98, (int)0xCCCCCCCC, 0x33333333, + (int)0xF0F0F0F0, 0x0F0F0F0F, 0x12345678, (int)0x87654321, 0x00000001, (int)0xDAACB66C, + 0x5166338C, (int)0xABCDEF01, (int)0xABCDEF01, (int)0xFEDCBA98)); +TEST_CONSTEXPR(match_v16si( + _mm512_mask_ternarylogic_epi32( + _mm512_setr_epi32( + (int)0xDEADBEEF, 0x01234567, (int)0xAAAAAAAA, 0x0F0F0F0F, (int)0xBAADF00D, 0x00000001, + (int)0x80000000, 0x7FFFFFFF, (int)0xCAFEBABE, 0x13579BDF, (int)0xABCDEF01, (int)0xCAFEBABE, + (int)0xDEADBEEF, (int)0xFF00FF00, (int)0xBEEFCAFE, 0x00000001), + (__mmask16)0xBEEF, + _mm512_setr_epi32( + (int)0xFACEB00C, (int)0x89ABCDEF, 0x55555555, (int)0xF0F0F0F0, 0x1337C0DE, 0x00000002, + 0x40000000, (int)0xBFFFFFFF, 0x00000000, 0x2468ACE0, 0x10FEDCBA, 0x00000000, + (int)0xFEEDFACE, 0x00FF00FF, 0x12345678, 0x00000002), + _mm512_setr_epi32( + 0x12345678, (int)0xFFFFFFFF, (int)0xCCCCCCCC, (int)0x88888888, (int)0xDEADC0DE, 0x00000004, + 0x20000000, (int)0xDFFFFFFF, (int)0xFFFFFFFF, (int)0xFEDCBA98, 0x55555555, (int)0xFFFFFFFF, + (int)0x8BADF00D, (int)0xF0F0F0F0, (int)0xFACEB00C, 0x00000003), + (unsigned char)0x96), // op: A ^ B ^ C (XOR3) + (int)0x3657589B, 0x77777777, 0x33333333, 0x77777777, (int)0xBAADF00D, 0x00000007, + (int)0xE0000000, 0x1FFFFFFF, (int)0xCAFEBABE, (int)0xC9E38DA7, (int)0xEE6666EE, 0x35014541, + (int)0xABEDB42C, 0x0F0F0F0F, (int)0xBEEFCAFE, 0x00000000)); __m512i test_mm512_maskz_ternarylogic_epi32(__mmask16 __U, __m512i __A, __m512i __B, __m512i __C) { // CHECK-LABEL: test_mm512_maskz_ternarylogic_epi32 @@ -6287,12 +6416,106 @@ __m512i test_mm512_maskz_ternarylogic_epi32(__mmask16 __U, __m512i __A, __m512i // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> zeroinitializer return _mm512_maskz_ternarylogic_epi32(__U, __A, __B, __C, _MM_TERNLOG_C); } +TEST_CONSTEXPR(match_v16si( + _mm512_maskz_ternarylogic_epi32( + (__mmask16)0x6498, + ((__m512i)((__v16si){ + 1393174638, 1243877629, -826208314, 1770837977, + -1678093555, -414088391, 1288769935, 703296098, + 1428104678, 405688910, -167788555, 1965219804, + -1959018749, 514303227, 754191429, 579811517})), + ((__m512i)((__v16si){ + -1301280384, -923736510, -797648805, 475853364, + 1247377062, 213070102, 626020209, 2037794518, + 122183669, 1712787569, -1042441569, -1416844145, + 1374304252, -1323427639, 1432483217, 1621706359})), + ((__m512i)((__v16si){ + 234227517, -313293475, 1851213039, -300885844, + -1479339544, 575183087, -655840260, -1853668117, + 433622095, 933629633, -1324904005, -68434060, + 486070655, 226865941, -1461464269, 1471789621})), + (unsigned char)0xAB), // (~A & ~B) | (B & C) + 0, 0, 0, -298592082, + -1479042568, 0, 0, -1752969749, + 0, 0, -1157115461, 0, + 0, 1304818453, -1427385541, 0)); +TEST_CONSTEXPR(match_v16si( + _mm512_maskz_ternarylogic_epi32( + (__mmask16)0xA593, + ((__m512i)((__v16si){ + 1789368711, -1148107974, 43676229, -772343700, + -750732339, -846759705, 1282648386, -622176070, + 1582316135, -292982512, 831769172, 1160692746, + -1863980731, 1873586768, 694443915, 1602297017})), + ((__m512i)((__v16si){ + 533722196, -540417103, 1859447115, 1121373020, + -1880858588, -1601100530, 748799881, -1667438068, + -1918900807, 802338724, 1524289825, -1124643462, + -180110385, -1147315996, 392105170, -2014156050})), + ((__m512i)((__v16si){ + -91740526, -1136489150, 1770039327, -1782187112, + -2130581350, 715425150, 636282949, 883535681, + -1207026931, 334328971, 815920853, -699777824, + 1466990855, 1383146204, 101022850, 1970696522})), + (unsigned char)0x21), // (~B) & ~(A ^ C) + 1611661482, 539234310, 0, 0, + 538610824, 0, 0, 18874368, + 270539268, 0, -1543175586, 0, + 0, 1075980051, 0, 1342738432)); +TEST_CONSTEXPR(match_v16si( + _mm512_maskz_ternarylogic_epi32( + (__mmask16)0xC3A5, + ((__m512i)((__v16si){ + 0x00000000, -0x1, (int)0x80000000, 0x7FFFFFFF, + (int)0xAAAAAAAA, 0x55555555, 0x00000001, (int)0xFFFFFFFE, + 0x0000FFFF, (int)0xFFFF0000, (int)0xDEADBEEF, (int)0xCAFEBABE, + 0x01234567, (int)0x89ABCDEF, 0x13579BDF, 0x2468ACE0})), + ((__m512i)((__v16si){ + 0x2468ACE0, 0x13579BDF, (int)0x89ABCDEF, 0x01234567, + (int)0xCAFEBABE, (int)0xDEADBEEF, (int)0xFFFF0000, 0x0000FFFF, + (int)0xFFFFFFFE, 0x00000001, 0x55555555, (int)0xAAAAAAAA, + 0x7FFFFFFF, (int)0x80000000, -0x1, 0x00000000})), + ((__m512i)((__v16si){ + -0x1, 0x00000000, -0x1, 0x00000000, + -0x1, 0x00000000, -0x1, 0x00000000, + -0x1, 0x00000000, -0x1, 0x00000000, + -0x1, 0x00000000, -0x1, 0x00000000})), + (unsigned char)0xC9), // F = (A & B) | (~A & ~(B ^ C)) + 0x2468ACE0, 0x0, (int)0x89ABCDEF, 0x0, + 0x0, 0x74071445, 0x0, 0x0000FFFE, + (int)0xFFFFFFFE, 0x0000FFFE, 0x0, 0x0, + 0x0, 0x0, (int)0xFFFFFFFF, (int)0xDB97531F)); __m512i test_mm512_ternarylogic_epi64(__m512i __A, __m512i __B, __m512i __C) { // CHECK-LABEL: test_mm512_ternarylogic_epi64 // CHECK: @llvm.x86.avx512.pternlog.q.512({{.*}}, i32 192) return _mm512_ternarylogic_epi64(__A, __B, __C, _MM_TERNLOG_A & _MM_TERNLOG_B); } +TEST_CONSTEXPR(match_v8di( + _mm512_ternarylogic_epi64( + ((__m512i)((__v8di){0x1111, 0x2222, 0x3333, 0x4444, 0x5555, 0x6666, 0x7777, 0x8888})), + ((__m512i)((__v8di){0xAAAA, 0xBBBB, 0xCCCC, 0xDDDD, 0xEEEE, 0xFFFF, 0x1111, 0x2222})), + ((__m512i)((__v8di){-0x1, 0x0, -0x1, 0x0, -0x1, 0x0, -0x1, 0x0})), + (unsigned char)0xD8), // C ? B : A + 0xAAAA, 0x2222, 0xCCCC, 0x4444, 0xEEEE, 0x6666, 0x1111, 0x8888)); +TEST_CONSTEXPR(match_v8di( + _mm512_ternarylogic_epi64( + ((__m512i)((__v8di){-0x1, 0x0, -0x1, 0x0, 0xF0F0, 0xFF, -0x5555555555555556, 0x5555555555555555})), + ((__m512i)((__v8di){0x1234, 0xFFFF, 0xFF, 0xF0F, 0x3333, 0xFF00, -0x5555555555555556, -0x0F0F0F0F0F0F0F10})), + ((__m512i)((__v8di){0xFFFF, 0x1234, 0xF0F, 0xFF00, 0xF0F0, 0x3333, 0x5555555555555555, 0x0F0F0F0F0F0F0F0})), + (unsigned char)0x8F), // ~A | (B & C) + 0x1234, -0x1, 0xF, -0x1, -0xC0C1, -0x100, 0x5555555555555555, -0x5505050505050506)); +TEST_CONSTEXPR(match_v8di( + _mm512_ternarylogic_epi64( + ((__m512i)((__v8di){0x7FFFFFFFFFFFFFFF, 0x0, 0x00FF00FF00FF00FF, 0x0F0F0F0F0F0F0F0F, + 0x123456789ABCDEF0, 0x3333333333333333, 0x5555555555555555, 0x0123456789ABCDEF})), + ((__m512i)((__v8di){0x1111111111111111, 0x2222222222222222, 0xFFFFFFFF, -0x100000000, + 0x0, -0x3333333333333334, -0x0F0F0F0F0F0F0F10, -0x123456789ABCDF0})), + ((__m512i)((__v8di){0x2222222222222222, 0x1111111111111111, -0x1000000000000, 0xFFFFFFFF, + -0x1, 0x0, 0x0F0F0F0F0F0F0F0F, 0x0})), + (unsigned char)0xE0), // A & (B | C) + 0x3333333333333333, 0x0, 0x00FF000000FF00FF, 0x0F0F0F0F0F0F0F0F, + 0x123456789ABCDEF0, 0x0, 0x5555555555555555, 0x0)); __m512i test_mm512_mask_ternarylogic_epi64(__m512i __A, __mmask8 __U, __m512i __B, __m512i __C) { // CHECK-LABEL: test_mm512_mask_ternarylogic_epi64 @@ -6300,6 +6523,40 @@ __m512i test_mm512_mask_ternarylogic_epi64(__m512i __A, __mmask8 __U, __m512i __ // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}} return _mm512_mask_ternarylogic_epi64(__A, __U, __B, __C, _MM_TERNLOG_B | _MM_TERNLOG_C); } +TEST_CONSTEXPR(match_v8di( + _mm512_mask_ternarylogic_epi64( + ((__m512i)((__v8di){0x0LL, 0x1LL, 0x2LL, 0x3LL, 0x4LL, 0x5LL, 0x6LL, 0x7LL})), + (__mmask8)0xFF, + ((__m512i)((__v8di){0x1LL, 0x1LL, 0x1LL, 0x1LL, 0x1LL, 0x1LL, 0x1LL, 0x1LL})), + ((__m512i)((__v8di){0x0LL, 0x0LL, 0x0LL, 0x0LL, 0x0LL, 0x0LL, 0x0LL, 0x0LL})), + (unsigned char)0x96), + 0x1, 0x0, 0x3, 0x2, 0x5, 0x4, 0x7, 0x6)); +TEST_CONSTEXPR(match_v8di( + _mm512_mask_ternarylogic_epi64( + ((__m512i)((__v8di){ + (long long)0x9FD641D41C6A70FEULL, (long long)0xB51D9082CF18D398ULL, + (long long)0x730E520285F4D01BULL, (long long)0x347E72CE341FD932ULL, + (long long)0x438F8D9BEA5D486FULL, (long long)0xFDB554A5DEEF750DULL, + (long long)0x0ABAA254BFFC2308ULL, (long long)0x825FE29BF1D51FC6ULL + })), + (__mmask8)0xE4, + ((__m512i)((__v8di){ + (long long)0xC1779B12FA832A6EULL, (long long)0xCF6E876B587C4762ULL, + (long long)0x25DC09833D4ECA24ULL, (long long)0x34E55E25691BB80AULL, + (long long)0x9A02450CD8F20DD7ULL, (long long)0x78B9E240FB5B77A9ULL, + (long long)0xE1F37F76C1162596ULL, (long long)0xDCCB561738CE2941ULL + })), + ((__m512i)((__v8di){ + (long long)0xD13840986BC8DC3CULL, (long long)0x34CDE7E8C960187EULL, + (long long)0x7EE068D9D111EEB8ULL, (long long)0xAD11149DE686B811ULL, + (long long)0x849F38BFD9AB0DFAULL, (long long)0x5C28948ED106227BULL, + (long long)0xFB1918D4A18E304DULL, (long long)0x4EDE6944F84AD59FULL + })), + (unsigned char)0x67), + (long long)0x9FD641D41C6A70FEULL, (long long)0xB51D9082CF18D398ULL, + (long long)0xDB3DE57EEE5F25DCULL, (long long)0x347E72CE341FD932ULL, + (long long)0x438F8D9BEA5D486FULL, (long long)0x26D37FDE2A5DDDD2ULL, + (long long)0x1EEE67AB6099DDFBULL, (long long)0xB3353F73C6A4FCFEULL)); __m512i test_mm512_maskz_ternarylogic_epi64(__mmask8 __U, __m512i __A, __m512i __B, __m512i __C) { // CHECK-LABEL: test_mm512_maskz_ternarylogic_epi64 @@ -6307,6 +6564,59 @@ __m512i test_mm512_maskz_ternarylogic_epi64(__mmask8 __U, __m512i __A, __m512i _ // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> zeroinitializer return _mm512_maskz_ternarylogic_epi64(__U, __A, __B, __C, ~_MM_TERNLOG_A | (_MM_TERNLOG_B ^ _MM_TERNLOG_C)); } +TEST_CONSTEXPR(match_v8di( + _mm512_maskz_ternarylogic_epi64( + (__mmask8)0x6D, + ((__m512i)((__v8di){ + (long long)0xFFFFFFFFFFFFFFFF, (long long)0x0000000000000000, + (long long)0x0000FFFF0000FFFF, (long long)0x5555555555555555, + (long long)0x0123456789ABCDEF, (long long)0x1122334455667788, + (long long)0x00000000FFFFFFFF, (long long)0x0F0F0F0F0F0F0F0F + })), + ((__m512i)((__v8di){ + (long long)0x000000000000000B, (long long)0x000000000000000C, + (long long)0x00000000FFFF0000, (long long)0x3333333333333333, + (long long)0x0FEDCBA987654321, (long long)0x1111111111111111, + (long long)0x7FFFFFFFFFFFFFFF, (long long)0x2222222222222222 + })), + ((__m512i)((__v8di){ + (long long)0x000000000000000C, (long long)0x000000000000000B, + (long long)0x00F0F0F0F0F0F0F0, (long long)0x5555555555555555, + (long long)0x0000000000000000, (long long)0x7FFFFFFFFFFFFFFF, + (long long)0x0000000000000001, (long long)0x2222222222222222 + })), + (unsigned char)0x89), + (long long)0x0000000000000008, (long long)0x0000000000000000, + (long long)0xFF0F0000F0F00000, (long long)0x9999999999999999, + (long long)0x0000000000000000, (long long)0x9111111111111111, + (long long)0x8000000000000001, (long long)0x0000000000000000)); + +TEST_CONSTEXPR(match_v8di( + _mm512_maskz_ternarylogic_epi64( + (__mmask8)0x6D, + ((__m512i)((__v8di){ + (long long)0xFFFFFFFFFFFFFFFF, (long long)0x0000000000000000, + (long long)0x0000FFFF0000FFFF, (long long)0x5555555555555555, + (long long)0x0123456789ABCDEF, (long long)0x1122334455667788, + (long long)0x00000000FFFFFFFF, (long long)0x0F0F0F0F0F0F0F0F + })), + ((__m512i)((__v8di){ + (long long)0x000000000000000B, (long long)0x000000000000000C, + (long long)0x00000000FFFF0000, (long long)0x3333333333333333, + (long long)0x0FEDCBA987654321, (long long)0x1111111111111111, + (long long)0x7FFFFFFFFFFFFFFF, (long long)0x2222222222222222 + })), + ((__m512i)((__v8di){ + (long long)0x000000000000000C, (long long)0x000000000000000B, + (long long)0x00F0F0F0F0F0F0F0, (long long)0x5555555555555555, + (long long)0x0000000000000000, (long long)0x7FFFFFFFFFFFFFFF, + (long long)0x0000000000000001, (long long)0x2222222222222222 + })), + (unsigned char)0x29), + (long long)0x0000000000000004, (long long)0x0000000000000000, + (long long)0xFF0FF0F0F0F0F0F0, (long long)0xCCCCCCCCCCCCCCCC, + (long long)0x0000000000000000, (long long)0x8033225544776699, + (long long)0x8000000000000000, (long long)0x0000000000000000)); __m512 test_mm512_shuffle_f32x4(__m512 __A, __m512 __B) { // CHECK-LABEL: test_mm512_shuffle_f32x4 diff --git a/clang/test/CodeGen/X86/avx512fp16-builtins.c b/clang/test/CodeGen/X86/avx512fp16-builtins.c index 37443d5..dbf89b3 100644 --- a/clang/test/CodeGen/X86/avx512fp16-builtins.c +++ b/clang/test/CodeGen/X86/avx512fp16-builtins.c @@ -17,6 +17,7 @@ _Float16 test_mm512_cvtsh_h(__m512h __A) { // CHECK: extractelement <32 x half> %{{.*}}, i32 0 return _mm512_cvtsh_h(__A); } +TEST_CONSTEXPR(_mm512_cvtsh_h((__m512h){-32.0, 31.0, -30.0, 29.0, -28.0, 27.0, -26.0, 25.0, -24.0, 23.0, -22.0, 21.0, -20.0, 19.0, -18.0, 17.0, -16.0, 15.0, -14.0, 13.0, -12.0, 11.0, -10.0, 9.0, -8.0, 7.0, -6.0, 5.0, -4.0, 3.0, -2.0, 1.0}) == -32.0); __m128h test_mm_setzero_ph(void) { // CHECK-LABEL: test_mm_setzero_ph diff --git a/clang/test/CodeGen/X86/avx512vl-builtins.c b/clang/test/CodeGen/X86/avx512vl-builtins.c index 8800623..51385d5 100644 --- a/clang/test/CodeGen/X86/avx512vl-builtins.c +++ b/clang/test/CodeGen/X86/avx512vl-builtins.c @@ -8359,6 +8359,27 @@ __m128i test_mm_ternarylogic_epi32(__m128i __A, __m128i __B, __m128i __C) { // CHECK: @llvm.x86.avx512.pternlog.d.128 return _mm_ternarylogic_epi32(__A, __B, __C, 4); } +TEST_CONSTEXPR(match_v4si( + _mm_ternarylogic_epi32( + ((__m128i)((__v4si){(int)0x7FFFFFFF, (int)0x80000000, (int)0xAAAAAAAA, 0x00000000})), + ((__m128i)((__v4si){0x00000000, (int)0xFFFFFFFF, 0x12345678, (int)0xFFFFFFFF})), + ((__m128i)((__v4si){(int)0xCAFEBABE, 0x0F0F0F0F, (int)0xFFFFFFFF, 0x00000000})), + (unsigned char)0xCA), /* B ? (A | C) : (C & ~A) */ + (int)0x80000000, (int)0x8F0F0F0F, 0x5775577D, 0x00000000)); +TEST_CONSTEXPR(match_v4si( + _mm_ternarylogic_epi32( + ((__m128i)((__v4si){0x12345678, (int)0x80000000, 0x00000000, (int)0xAAAAAAAA})), + ((__m128i)((__v4si){0x0000FFFF, 0x7FFFFFFF, 0x55555555, 0x00000000})), + ((__m128i)((__v4si){(int)0xF0F0F0F0, 0x00000001, 0x0F0F0F0F, 0x33333333})), + (unsigned char)0xFE), /* A | B | C */ + (int)0xF2F4FFFF, (int)0xFFFFFFFF, 0x5F5F5F5F, (int)0xBBBBBBBB)); +TEST_CONSTEXPR(match_v4si( + _mm_ternarylogic_epi32( + ((__m128i)((__v4si){(int)0xFFFFFFFF, 0x12345678, (int)0x80000000, 0x0F0F0F0F})), + ((__m128i)((__v4si){0x00FF00FF, (int)0xFFFFFFFF, 0x7FFFFFFF, (int)0xF0F0F0F0})), + ((__m128i)((__v4si){0x0F0F0F0F, 0x00FF00FF, (int)0xFFFFFFFF, (int)0xFFFFFFFF})), + (unsigned char)0x80), /* A & B & C */ + 0x000F000F, 0x00340078, 0x00000000, 0x00000000)); __m128i test_mm_mask_ternarylogic_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) { // CHECK-LABEL: test_mm_mask_ternarylogic_epi32 @@ -8366,6 +8387,30 @@ __m128i test_mm_mask_ternarylogic_epi32(__m128i __A, __mmask8 __U, __m128i __B, // CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}} return _mm_mask_ternarylogic_epi32(__A, __U, __B, __C, 4); } +TEST_CONSTEXPR(match_v4si( + _mm_mask_ternarylogic_epi32( + ((__m128i)((__v4si){-0x1, 0x0, -0x1, 0x0})), + (__mmask8)0x03, + ((__m128i)((__v4si){0xB, 0xB, 0xB, 0xB})), + ((__m128i)((__v4si){0xC, 0xC, 0xC, 0xC })), + (unsigned char)0xCA), // A ? B : C + 0xB, 0xC, -0x1, 0x0)); +TEST_CONSTEXPR(match_v4si( + _mm_mask_ternarylogic_epi32( + ((__m128i)((__v4si){0x9, 0x9, 0x9, 0x9})), + (__mmask8)0x0C, + ((__m128i)((__v4si){0x4, 0x4, 0x4, 0x4})), + ((__m128i)((__v4si){0x2, 0x2, 0x2, 0x2})), + (unsigned char)0xFE), // A | B | C + 0x9, 0x9, 0xF, 0xF)); +TEST_CONSTEXPR(match_v4si( + _mm_mask_ternarylogic_epi32( + ((__m128i)((__v4si){0x9, 0x9, 0x9, 0x9})), + (__mmask8)0x05, + ((__m128i)((__v4si){0x4, 0x4, 0x4, 0x4})), + ((__m128i)((__v4si){0x2, 0x2, 0x2, 0x2})), + (unsigned char)0x80), // A & B & C + 0x0, 0x9, 0x0, 0x9)); __m128i test_mm_maskz_ternarylogic_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) { // CHECK-LABEL: test_mm_maskz_ternarylogic_epi32 @@ -8373,12 +8418,57 @@ __m128i test_mm_maskz_ternarylogic_epi32(__mmask8 __U, __m128i __A, __m128i __B, // CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> zeroinitializer return _mm_maskz_ternarylogic_epi32(__U, __A, __B, __C, 4); } +TEST_CONSTEXPR(match_v4si( + _mm_maskz_ternarylogic_epi32( + (__mmask8)0x0B, + ((__m128i)((__v4si){(int)0xDEADBEEF, 0, (int)0xFFFFFFFF, 0x13579BDF})), + ((__m128i)((__v4si){(int)0xFFFFFFFF, 0, (int)0xFFFFFFFF, 0})), + ((__m128i)((__v4si){(int)0xCAFEBABE, (int)0xFFFFFFFF, 0, 0x2468ACE0})), + (unsigned char)0xE2), // B ? A : C + (int)0xDEADBEEF, (int)0xFFFFFFFF, 0, 0x2468ACE0)); +TEST_CONSTEXPR(match_v4si( + _mm_maskz_ternarylogic_epi32( + (__mmask8)0x0C, + ((__m128i)((__v4si){0, (int)0xFFFFFFFF, (int)0xAAAAAAAA, 0x55555555})), + ((__m128i)((__v4si){(int)0xFFFFFFFF, 0, (int)0xFFFFFFFF, (int)0xFFFFFFFF})), + ((__m128i)((__v4si){(int)0xF0F0F0F0, 0, 0, (int)0xFFFFFFFF})), + (unsigned char)0x7F), // ~(A & B) | ~(B & C) + 0, 0, (int)0xFFFFFFFF, (int)0xAAAAAAAA)); +TEST_CONSTEXPR(match_v4si( + _mm_maskz_ternarylogic_epi32( + (__mmask8)0x05, + ((__m128i)((__v4si){(int)0xFFFFFFFF, 0, 0x12345678, 0})), + ((__m128i)((__v4si){0, 0, 0x0000FFFF, (int)0xFFFFFFFF})), + ((__m128i)((__v4si){0, 0, 0x0000000F, 0})), + (unsigned char)0xBF), // ~A | ~B | C imm = 0xBF + (int)0xFFFFFFFF, 0, (int)0xFFFFA98F, 0)); __m256i test_mm256_ternarylogic_epi32(__m256i __A, __m256i __B, __m256i __C) { // CHECK-LABEL: test_mm256_ternarylogic_epi32 // CHECK: @llvm.x86.avx512.pternlog.d.256 return _mm256_ternarylogic_epi32(__A, __B, __C, 4); } +TEST_CONSTEXPR(match_v8si( + _mm256_ternarylogic_epi32( + ((__m256i)((__v8si){0x12345678, 0x00000000, (int)0xFFFFFFFF, 0x7FFFFFFF, (int)0x80000000, 0x00FF00FF, (int)0xF0F0F0F0, (int)0xAAAAAAAA})), + ((__m256i)((__v8si){(int)0xDEADBEEF, 0x11111111, 0x22222222, 0x33333333, 0x44444444, 0x55555555, 0x66666666, 0x77777777})), + ((__m256i)((__v8si){(int)0xCAFEBABE, (int)0x88888888, (int)0x99999999, (int)0xAAAAAAAA, (int)0xBBBBBBBB, (int)0xCCCCCCCC, (int)0xDDDDDDDD, (int)0xFFFFFFFF})), + (unsigned char)0xF0), /* A */ + 0x12345678, 0x00000000, (int)0xFFFFFFFF, 0x7FFFFFFF, (int)0x80000000, 0x00FF00FF, (int)0xF0F0F0F0, (int)0xAAAAAAAA)); +TEST_CONSTEXPR(match_v8si( + _mm256_ternarylogic_epi32( + ((__m256i)((__v8si){0x12345678, 0x00000000, (int)0xFFFFFFFF, 0x7FFFFFFF, (int)0x80000000, 0x00FF00FF, (int)0xF0F0F0F0, (int)0xAAAAAAAA})), + ((__m256i)((__v8si){(int)0xAAAAAAAA, (int)0xBBBBBBBB, (int)0xCCCCCCCC, (int)0xDDDDDDDD, (int)0xEEEEEEEE, (int)0xFFFFFFFF, 0x00000000, 0x11111111})), + ((__m256i)((__v8si){0x22222222, 0x33333333, 0x44444444, 0x55555555, 0x66666666, 0x77777777, (int)0x88888888, (int)0x99999999})), + (unsigned char)0x0F), /* ~A */ + (int)0xEDCBA987, (int)0xFFFFFFFF, 0x00000000, (int)0x80000000, 0x7FFFFFFF, (int)0xFF00FF00, 0x0F0F0F0F, 0x55555555)); +TEST_CONSTEXPR(match_v8si( + _mm256_ternarylogic_epi32( + ((__m256i)((__v8si){0x0F0F0F0F, (int)0xAAAAAAAA, 0x12345678, 0x00000000, (int)0xFFFFFFFF, 0x13579BDF, (int)0x80000000, 0x7FFFFFFF})), + ((__m256i)((__v8si){(int)0xF0F0F0F0, 0x55555555, 0x11111111, (int)0xFFFFFFFF, 0x00000000, 0x02468ACE, 0x7FFFFFFF, (int)0x80000000})), + ((__m256i)((__v8si){(int)0xAAAAAAAA, (int)0xAAAAAAAA, (int)0xAAAAAAAA, (int)0xAAAAAAAA, (int)0xAAAAAAAA, (int)0xAAAAAAAA, (int)0xAAAAAAAA, (int)0xAAAAAAAA})), + (unsigned char)0x3C), /* A ^ B */ + (int)0xFFFFFFFF, (int)0xFFFFFFFF, 0x03254769, (int)0xFFFFFFFF, (int)0xFFFFFFFF, 0x11111111, (int)0xFFFFFFFF, (int)0xFFFFFFFF)); __m256i test_mm256_mask_ternarylogic_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) { // CHECK-LABEL: test_mm256_mask_ternarylogic_epi32 @@ -8386,6 +8476,30 @@ __m256i test_mm256_mask_ternarylogic_epi32(__m256i __A, __mmask8 __U, __m256i __ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}} return _mm256_mask_ternarylogic_epi32(__A, __U, __B, __C, 4); } +TEST_CONSTEXPR(match_v8si( + _mm256_mask_ternarylogic_epi32( + ((__m256i)((__v8si){(int)0xFFFFFFFF, 0x00000000, 0x12345678, (int)0xAAAAAAAA, 0x7FFFFFFF, (int)0x80000000, 0x13579BDF, 0x2468ACE0})), + (__mmask8)0xA5, + ((__m256i)((__v8si){(int)0xFFFFFFFF, 0x00000000, (int)0xFFFFFFFF, 0x00000000, (int)0xFFFFFFFF, 0x00000000, (int)0xFFFFFFFF, 0x00000000})), + ((__m256i)((__v8si){0x00000000, (int)0xFFFFFFFF, 0x55555555, 0x33333333, (int)0x89ABCDEF, 0x00000000, (int)0xFFFFFFFF, 0x11111111})), + (unsigned char)0xE2), /* B ? A : C */ + (int)0xFFFFFFFF, 0x00000000, 0x12345678, (int)0xAAAAAAAA, 0x7FFFFFFF, 0x00000000, 0x13579BDF, 0x11111111)); +TEST_CONSTEXPR(match_v8si( + _mm256_mask_ternarylogic_epi32( + ((__m256i)((__v8si){0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F})), + (__mmask8)0xFF, + ((__m256i)((__v8si){0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF})), + ((__m256i)((__v8si){0x33333333, 0x33333333, 0x33333333, 0x33333333, 0x33333333, 0x33333333, 0x33333333, 0x33333333})), + (unsigned char)0x96), /* A ^ B ^ C */ + 0x3CC33CC3, 0x3CC33CC3, 0x3CC33CC3, 0x3CC33CC3, 0x3CC33CC3, 0x3CC33CC3, 0x3CC33CC3, 0x3CC33CC3)); +TEST_CONSTEXPR(match_v8si( + _mm256_mask_ternarylogic_epi32( + ((__m256i)((__v8si){(int)0xFFFFFFFF, 0x00000000, (int)0xFFFFFFFF, 0x12345678, (int)0xAAAAAAAA, 0x55555555, (int)0x80000000, 0x7FFFFFFF})), + (__mmask8)0x5A, + ((__m256i)((__v8si){0x00000000, (int)0xFFFFFFFF, 0x11111111, (int)0xFFFFFFFF, 0x55555555, (int)0xAAAAAAAA, (int)0x80000000, 0x7FFFFFFF})), + ((__m256i)((__v8si){0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000})), + (unsigned char)0xC0), /* A & B */ + (int)0xFFFFFFFF, 0x00000000, (int)0xFFFFFFFF, 0x12345678, 0x00000000, 0x55555555, (int)0x80000000, 0x7FFFFFFF)); __m256i test_mm256_maskz_ternarylogic_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) { // CHECK-LABEL: test_mm256_maskz_ternarylogic_epi32 @@ -8393,12 +8507,60 @@ __m256i test_mm256_maskz_ternarylogic_epi32(__mmask8 __U, __m256i __A, __m256i _ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> zeroinitializer return _mm256_maskz_ternarylogic_epi32(__U, __A, __B, __C, 4); } +TEST_CONSTEXPR(match_v8si( + _mm256_maskz_ternarylogic_epi32( + (__mmask8)0x6D, + ((__m256i)((__v8si){(int)-1, 0, (int)-1, 0, (int)-1, 0, (int)-1, 0})), + ((__m256i)((__v8si){0xB, 0xB, 0xB, 0xB, 0xB, 0xB, 0xB, 0xB})), + ((__m256i)((__v8si){0xC, 0xC, 0xC, 0xC, 0xC, 0xC, 0xC, 0xC})), + (unsigned char)0x30), /* A & ~B */ + (int)0xFFFFFFF4, 0, (int)0xFFFFFFF4, 0, 0, 0, (int)0xFFFFFFF4, 0)); +TEST_CONSTEXPR(match_v8si( + _mm256_maskz_ternarylogic_epi32( + (__mmask8)0x90, + ((__m256i)((__v8si){0x9, 0x9, 0x9, 0x9, 0x9, 0x9, 0x9, 0x9})), + ((__m256i)((__v8si){0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4})), + ((__m256i)((__v8si){0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2})), + (unsigned char)0x44), /* B & ~C */ + 0, 0, 0, 0, 0x4, 0, 0, 0x4)); +TEST_CONSTEXPR(match_v8si( + _mm256_maskz_ternarylogic_epi32( + (__mmask8)0x0F, + ((__m256i)((__v8si){0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3})), + ((__m256i)((__v8si){0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1})), + ((__m256i)((__v8si){0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2})), + (unsigned char)0x28), /* (A ^ B) & C */ + 0x2, 0x2, 0x2, 0x2, 0, 0, 0, 0)); __m128i test_mm_ternarylogic_epi64(__m128i __A, __m128i __B, __m128i __C) { // CHECK-LABEL: test_mm_ternarylogic_epi64 // CHECK: @llvm.x86.avx512.pternlog.q.128 return _mm_ternarylogic_epi64(__A, __B, __C, 4); } +TEST_CONSTEXPR(match_v2di( + _mm_ternarylogic_epi64( + ((__m128i)((__v2di){ (long long)0xBB91433A6AA79987ULL, (long long)0xD1F6F86C029A7245ULL })), + ((__m128i)((__v2di){ (long long)0xCD8778E7D340BBCDULL, (long long)0xDAEA58BA4C73A942ULL })), + ((__m128i)((__v2di){ (long long)0xEE8971105E503A67ULL, (long long)0x452EC40A3193CA54ULL })), + (unsigned char)0x77), // F = ~(A & B) + (long long)0x337E8FFFADBFC5BAULL, + (long long)0xBFD5BFF5FFEC77BFULL)); +TEST_CONSTEXPR(match_v2di( + _mm_ternarylogic_epi64( + ((__m128i)((__v2di){ (long long)0x6FACAA5090E5E945ULL, (long long)0x5F811CB929645F8BULL })), + ((__m128i)((__v2di){ (long long)0xDFC9E3B11FCFF454ULL, (long long)0x42D6CB5C6ED4E94BULL })), + ((__m128i)((__v2di){ (long long)0xA091250E8FE46024ULL, (long long)0x9C9CEA0C2CA1C789ULL })), + (unsigned char)0xDD), // F = (~A) | B + (long long)0xDFEFFBF17FDFFFDFULL, + (long long)0x63F7DFFFFFDEF97FULL)); +TEST_CONSTEXPR(match_v2di( + _mm_ternarylogic_epi64( + ((__m128i)((__v2di){ (long long)0x2FD2B7A48D9FE5B9ULL, (long long)0xBCF74D7A5ADAD121ULL })), + ((__m128i)((__v2di){ (long long)0xBB9D58E4F543BBCFULL, (long long)0x87F26AEE175F0CD2ULL })), + ((__m128i)((__v2di){ (long long)0xBC428D42FA882692ULL, (long long)0x95C5FB986980A81FULL })), + (unsigned char)0x22), // F = A & ~B + (long long)0x044285020A880410ULL, + (long long)0x100591106880A00DULL)); __m128i test_mm_mask_ternarylogic_epi64(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) { // CHECK-LABEL: test_mm_mask_ternarylogic_epi64 @@ -8406,6 +8568,40 @@ __m128i test_mm_mask_ternarylogic_epi64(__m128i __A, __mmask8 __U, __m128i __B, // CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}} return _mm_mask_ternarylogic_epi64(__A, __U, __B, __C, 4); } +TEST_CONSTEXPR(match_v2di( + _mm_mask_ternarylogic_epi64( + ((__m128i)((__v2di){(long long)0xF4C3B00C0D15EA5ELL, (long long)0x0123456789ABCDE0LL})), + (__mmask8)0x9D, + ((__m128i)((__v2di){(long long)0x9A7F3C2155EE00DDLL, (long long)0xDEADBEEFCAFEBABELL})), + ((__m128i)((__v2di){(long long)0x00F0F0F0F0F0F0F0LL, (long long)0x13579BDF2468ACE0LL})), + (unsigned char)0xFF), // All 1s + (long long)-1, + (long long)0x0123456789ABCDE0LL)); +TEST_CONSTEXPR(match_v2di( + _mm_mask_ternarylogic_epi64( + ((__m128i)((__v2di){ (long long)0x3A7C19E54B20D8A1LL, (long long)0x4F12B39D0C85E762LL })), + (__mmask8)0xD2, + ((__m128i)((__v2di){ (long long)0x6D93A0F217C54E3BLL, (long long)0x24E1C7A95B08D6F2LL })), + ((__m128i)((__v2di){ (long long)0x5A0C3E19D472B8F5LL, (long long)0x0187D3B2C9E4056ALL })), + (unsigned char)0x00), + (long long)0x3A7C19E54B20D8A1LL, + (long long)0x0LL)); // All 0s +TEST_CONSTEXPR(match_v2di( + _mm_mask_ternarylogic_epi64( + ((__m128i)((__v2di){ + (long long)0xA3F10B6C7D8294E1ULL, (long long)0x19D4E7350AB2C98FLL + })), + (__mmask8)0xB5, + ((__m128i)((__v2di){ + (long long)0x5C2E9A10F4B7D863LL, (long long)0x9B7E1D2C3A4F5E60LL + })), + ((__m128i)((__v2di){ + (long long)0x2A6D3F81C9E047B5LL, (long long)0x7F0A1C3E5D2B6490LL + })), + (unsigned char)0x55), // ~C + (long long)0xD592C07E361FB84AULL, + (long long)0x19D4E7350AB2C98FLL +)); __m128i test_mm_maskz_ternarylogic_epi64(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) { // CHECK-LABEL: test_mm_maskz_ternarylogic_epi64 @@ -8413,12 +8609,72 @@ __m128i test_mm_maskz_ternarylogic_epi64(__mmask8 __U, __m128i __A, __m128i __B, // CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> zeroinitializer return _mm_maskz_ternarylogic_epi64(__U, __A, __B, __C, 4); } +TEST_CONSTEXPR(match_v2di( + _mm_maskz_ternarylogic_epi64( + (__mmask8)0xA9, + ((__m128i)((__v2di){ + (long long)0x8F3A5C7E21D4B690ULL, (long long)0x5AD02CE19B7F46A3ULL + })), + ((__m128i)((__v2di){ + (long long)0xC19E04B2A7D35F68ULL, (long long)0x2F7B93C4E1A05D76ULL + })), + ((__m128i)((__v2di){ + (long long)0x7A0C1D2E3F405162ULL, (long long)0xD4E5F60718293A4BULL + })), + (unsigned char)0xD2), // F = C ? (B | ~A) : (A & ~B) + (long long)0xB53A457239D4B692ULL, + (long long)0x0ULL)); +TEST_CONSTEXPR(match_v2di( + _mm_maskz_ternarylogic_epi64( + (__mmask8)0xB6, + ((__m128i)((__v2di){ + (long long)0x83C1D2E3F4051627ULL, (long long)0x5A0B1C2D3E4F6071ULL + })), + ((__m128i)((__v2di){ + (long long)0x9E8D7C6B5A493827ULL, (long long)0x13579BDF2468ACE0ULL + })), + ((__m128i)((__v2di){ + (long long)0x02468ACE13579BDFULL, (long long)0xFEDCBA9876543210ULL + })), + (unsigned char)0xFE), // F = A | B | C + (long long)0x0ULL, + (long long)0xFFDFBFFF7E7FFEF1ULL)); +TEST_CONSTEXPR(match_v2di( + _mm_maskz_ternarylogic_epi64( + (__mmask8)0xA5, + ((__m128i)((__v2di){ + (long long)0x1C80317FA3B1799DULL, (long long)0xBDD640FB06671AD1ULL + })), + ((__m128i)((__v2di){ + (long long)0x3EB13B9046685257ULL, (long long)0x23B8C1E9392456DEULL + })), + ((__m128i)((__v2di){ + (long long)0x1A3D1FA7BC8960A9ULL, (long long)0xBD9C66B3AD3C2D6DULL + })), + (unsigned char)0x80), // F = A & B & C + (long long)0x1800110000004001ULL, + (long long)0x0ULL +)); __m256i test_mm256_ternarylogic_epi64(__m256i __A, __m256i __B, __m256i __C) { // CHECK-LABEL: test_mm256_ternarylogic_epi64 // CHECK: @llvm.x86.avx512.pternlog.q.256 return _mm256_ternarylogic_epi64(__A, __B, __C, 4); } +TEST_CONSTEXPR(match_v4di( + _mm256_ternarylogic_epi64( + ((__m256i)((__v4di){-0x1, 0x0, -0x1, 0x0})), + ((__m256i)((__v4di){0xB, 0xB, 0xB, 0xB})), + ((__m256i)((__v4di){0xC, 0xC, 0xC, 0xC})), + (unsigned char)0x94), + (long long)-0x8, (long long)0x3, (long long)-0x8, (long long)0x3)); +TEST_CONSTEXPR(match_v4di( + _mm256_ternarylogic_epi64( + ((__m256i)((__v4di){0x9, 0x9, 0x9, 0x9})), + ((__m256i)((__v4di){0x4, 0x4, 0x4, 0x4})), + ((__m256i)((__v4di){0x2, 0x2, 0x2, 0x2})), + (unsigned char)0x76), + (long long)0xF, (long long)0xF, (long long)0xF, (long long)0xF)); __m256i test_mm256_mask_ternarylogic_epi64(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) { // CHECK-LABEL: test_mm256_mask_ternarylogic_epi64 @@ -8426,6 +8682,46 @@ __m256i test_mm256_mask_ternarylogic_epi64(__m256i __A, __mmask8 __U, __m256i __ // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}} return _mm256_mask_ternarylogic_epi64(__A, __U, __B, __C, 4); } +TEST_CONSTEXPR(match_v4di( + _mm256_mask_ternarylogic_epi64( + ((__m256i)((__v4di){ + (long long)0x0123456789ABCDEFULL, (long long)0x0F0F0F0F0F0F0F0FULL, + (long long)0xAAAAAAAAAAAAAAAALL, (long long)0x13579BDF02468ACEULL + })), + (__mmask8)0x09, + ((__m256i)((__v4di){ + (long long)0x1111111111111111ULL, (long long)0x2222222222222222ULL, + (long long)0x3333333333333333ULL, (long long)0x4444444444444444ULL + })), + ((__m256i)((__v4di){ + (long long)0x5555555555555555ULL, (long long)0x6666666666666666ULL, + (long long)0x7777777777777777ULL, (long long)0x8888888888888888ULL + })), + (unsigned char)0x12), + (long long)0x44660022CCEE88AAULL, + (long long)0x0F0F0F0F0F0F0F0FULL, + (long long)0xAAAAAAAAAAAAAAAALL, + (long long)0x9B9B13138A8A0202ULL)); +TEST_CONSTEXPR(match_v4di( + _mm256_mask_ternarylogic_epi64( + ((__m256i)((__v4di){ + (long long)0xDEADBEEFDEADBEEFULL, (long long)0xCAFEBABECAFEBABEULL, + (long long)0xF00DFACEF00DFACEULL, (long long)0x0123456789ABCDEFULL + })), + (__mmask8)0x06, + ((__m256i)((__v4di){ + (long long)0x0000000000000000ULL, (long long)0xFFFFFFFFFFFFFFFFULL, + (long long)0x13579BDF13579BDFULL, (long long)0x0AAAAAAAAAAAAAAULL + })), + ((__m256i)((__v4di){ + (long long)0x1111111111111111ULL, (long long)0x2222222222222222ULL, + (long long)0x3333333333333333ULL, (long long)0x4444444444444444ULL + })), + (unsigned char)0x23), + (long long)0xDEADBEEFDEADBEEFULL, + (long long)0x0000000000000000ULL, + (long long)0x2CA024202CA02420ULL, + (long long)0x0123456789ABCDEFULL)); __m256i test_mm256_maskz_ternarylogic_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) { // CHECK-LABEL: test_mm256_maskz_ternarylogic_epi64 @@ -8433,6 +8729,41 @@ __m256i test_mm256_maskz_ternarylogic_epi64(__mmask8 __U, __m256i __A, __m256i _ // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> zeroinitializer return _mm256_maskz_ternarylogic_epi64(__U, __A, __B, __C, 4); } +TEST_CONSTEXPR(match_v4di( + _mm256_maskz_ternarylogic_epi64( + (__mmask8)0x05, + ((__m256i)((__v4di){ + (long long)0x1, (long long)0x2, (long long)0x0, (long long)0x7 + })), + ((__m256i)((__v4di){ + (long long)0x0, (long long)0x3, (long long)0x4, (long long)0x0 + })), + ((__m256i)((__v4di){ + (long long)0x0, (long long)0x5, (long long)0x0, (long long)0x1 + })), + (unsigned char)0xFE), + (long long)0x1, + (long long)0x0, + (long long)0x4, + (long long)0x0)); +TEST_CONSTEXPR(match_v4di( + _mm256_maskz_ternarylogic_epi64( + (__mmask8)0x0A, + ((__m256i)((__v4di){ + (long long)0x1, (long long)0x0, (long long)0x2, (long long)0x1 + })), + ((__m256i)((__v4di){ + (long long)0x0, (long long)0x1, (long long)0x0, (long long)0x0 + })), + ((__m256i)((__v4di){ + (long long)0x0, (long long)0x0, (long long)0x4, (long long)0x1 + })), + (unsigned char)0xED), + (long long)0x0, + (long long)-0x1, + (long long)0x0, + (long long)-0x1)); + __m256 test_mm256_shuffle_f32x4(__m256 __A, __m256 __B) { // CHECK-LABEL: test_mm256_shuffle_f32x4 // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15> @@ -9272,6 +9603,8 @@ __m128i test_mm_cvtepi32_epi8(__m128i __A) { return _mm_cvtepi32_epi8(__A); } +TEST_CONSTEXPR(match_v16qi(_mm_cvtepi32_epi8((__m128i)(__v4si){1, 2, 3, 4}), 1 ,2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); + __m128i test_mm_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A) { // CHECK-LABEL: test_mm_mask_cvtepi32_epi8 // CHECK: @llvm.x86.avx512.mask.pmov.db.128 @@ -9297,6 +9630,8 @@ __m128i test_mm256_cvtepi32_epi8(__m256i __A) { return _mm256_cvtepi32_epi8(__A); } +TEST_CONSTEXPR(match_v16qi(_mm256_cvtepi32_epi8((__m256i)(__v8si){1, 2, 3, 4, 5, 6, 7, 8}), 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0 ,0)); + __m128i test_mm256_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A) { // CHECK-LABEL: test_mm256_mask_cvtepi32_epi8 // CHECK: @llvm.x86.avx512.mask.pmov.db.256 @@ -9322,6 +9657,8 @@ __m128i test_mm_cvtepi32_epi16(__m128i __A) { return _mm_cvtepi32_epi16(__A); } +TEST_CONSTEXPR(match_v8hi(_mm_cvtepi32_epi16((__m128i)(__v4si){1, 2, 3, 4}), 1 ,2, 3, 4, 0, 0, 0, 0)); + __m128i test_mm_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A) { // CHECK-LABEL: test_mm_mask_cvtepi32_epi16 // CHECK: @llvm.x86.avx512.mask.pmov.dw.128 @@ -9346,6 +9683,8 @@ __m128i test_mm256_cvtepi32_epi16(__m256i __A) { return _mm256_cvtepi32_epi16(__A); } +TEST_CONSTEXPR(match_v8hi(_mm256_cvtepi32_epi16((__m256i)(__v8si){1, 2, 3, 4, 5, 6, 7, 8}), 1, 2, 3, 4, 5, 6, 7, 8)); + __m128i test_mm256_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A) { // CHECK-LABEL: test_mm256_mask_cvtepi32_epi16 // CHECK: @llvm.x86.avx512.mask.pmov.dw.256 @@ -9371,6 +9710,8 @@ __m128i test_mm_cvtepi64_epi8(__m128i __A) { return _mm_cvtepi64_epi8(__A); } +TEST_CONSTEXPR(match_v16qi(_mm_cvtepi64_epi8((__m128i)(__v2di){1, 2}), 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); + __m128i test_mm_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A) { // CHECK-LABEL: test_mm_mask_cvtepi64_epi8 // CHECK: @llvm.x86.avx512.mask.pmov.qb.128 @@ -9396,6 +9737,8 @@ __m128i test_mm256_cvtepi64_epi8(__m256i __A) { return _mm256_cvtepi64_epi8(__A); } +TEST_CONSTEXPR(match_v16qi(_mm256_cvtepi64_epi8((__m256i)(__v4di){1, 2, 3, 4}), 1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); + __m128i test_mm256_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A) { // CHECK-LABEL: test_mm256_mask_cvtepi64_epi8 // CHECK: @llvm.x86.avx512.mask.pmov.qb.256 @@ -9421,6 +9764,8 @@ __m128i test_mm_cvtepi64_epi32(__m128i __A) { return _mm_cvtepi64_epi32(__A); } +TEST_CONSTEXPR(match_v4si(_mm_cvtepi64_epi32((__m128i)(__v2di){1, 2}),1, 2, 0, 0)); + __m128i test_mm_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A) { // CHECK-LABEL: test_mm_mask_cvtepi64_epi32 // CHECK: @llvm.x86.avx512.mask.pmov.qd.128 @@ -9445,6 +9790,8 @@ __m128i test_mm256_cvtepi64_epi32(__m256i __A) { return _mm256_cvtepi64_epi32(__A); } +TEST_CONSTEXPR(match_v4si(_mm256_cvtepi64_epi32((__m256i)(__v4di){1 ,2 ,3 ,4}), 1, 2, 3, 4)); + __m128i test_mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) { // CHECK-LABEL: test_mm256_mask_cvtepi64_epi32 // CHECK: trunc <4 x i64> %{{.*}} to <4 x i32> @@ -9452,6 +9799,8 @@ __m128i test_mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) { return _mm256_mask_cvtepi64_epi32(__O, __M, __A); } +TEST_CONSTEXPR(match_v4si(_mm256_mask_cvtepi64_epi32(_mm_set1_epi32(-777), 0xA,(__m256i)(__v4di){1, -2, 3, -4}), -777, -2, -777, -4)); + __m128i test_mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A) { // CHECK-LABEL: test_mm256_maskz_cvtepi64_epi32 // CHECK: trunc <4 x i64> %{{.*}} to <4 x i32> @@ -9459,6 +9808,8 @@ __m128i test_mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A) { return _mm256_maskz_cvtepi64_epi32(__M, __A); } +TEST_CONSTEXPR(match_v4si(_mm256_maskz_cvtepi64_epi32( 0xA,(__m256i)(__v4di){1, -2, 3, -4}),0 , -2, 0, -4)); + void test_mm256_mask_cvtepi64_storeu_epi32(void * __P, __mmask8 __M, __m256i __A) { // CHECK-LABEL: test_mm256_mask_cvtepi64_storeu_epi32 // CHECK: @llvm.x86.avx512.mask.pmov.qd.mem.256 @@ -9472,6 +9823,8 @@ __m128i test_mm_cvtepi64_epi16(__m128i __A) { return _mm_cvtepi64_epi16(__A); } +TEST_CONSTEXPR(match_v8hi(_mm_cvtepi64_epi16((__m128i)(__v2di){1, 2}),1, 2, 0, 0, 0, 0, 0, 0)); + __m128i test_mm_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A) { // CHECK-LABEL: test_mm_mask_cvtepi64_epi16 // CHECK: @llvm.x86.avx512.mask.pmov.qw.128 @@ -9497,6 +9850,8 @@ __m128i test_mm256_cvtepi64_epi16(__m256i __A) { return _mm256_cvtepi64_epi16(__A); } +TEST_CONSTEXPR(match_v8hi(_mm256_cvtepi64_epi16((__m256i)(__v4di){1 ,2, 3, 4}),1, 2, 3, 4, 0, 0, 0, 0)); + __m128i test_mm256_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A) { // CHECK-LABEL: test_mm256_mask_cvtepi64_epi16 // CHECK: @llvm.x86.avx512.mask.pmov.qw.256 diff --git a/clang/test/CodeGen/builtins-nvptx.c b/clang/test/CodeGen/builtins-nvptx.c index f994adb..e3be262 100644 --- a/clang/test/CodeGen/builtins-nvptx.c +++ b/clang/test/CodeGen/builtins-nvptx.c @@ -43,6 +43,12 @@ // RUN: %clang_cc1 -ffp-contract=off -triple nvptx64-unknown-unknown -target-cpu sm_120a -target-feature +ptx86 -DPTX=86 \ // RUN: -disable-llvm-optzns -fcuda-is-device -emit-llvm -o - -x cuda %s \ // RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX86_SM120a %s +// RUN: %clang_cc1 -ffp-contract=off -triple nvptx64-unknown-unknown -target-cpu sm_103a -target-feature +ptx87 -DPTX=87 \ +// RUN: -disable-llvm-optzns -fcuda-is-device -emit-llvm -o - -x cuda %s \ +// RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX87_SM103a %s +// RUN: %clang_cc1 -ffp-contract=off -triple nvptx64-unknown-unknown -target-cpu sm_100a -target-feature +ptx87 -DPTX=87 \ +// RUN: -disable-llvm-optzns -fcuda-is-device -emit-llvm -o - -x cuda %s \ +// RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX87_SM100a %s // ### The last run to check with the highest SM and PTX version available // ### to make sure target builtins are still accepted. // RUN: %clang_cc1 -ffp-contract=off -triple nvptx64-unknown-unknown -target-cpu sm_120a -target-feature +ptx87 -DPTX=87 \ @@ -1203,6 +1209,123 @@ __device__ void nvvm_cvt_sm100a_sm101a_sm120a() { // CHECK: ret void } +__device__ void nvvm_cvt_sm100a_sm103a() { +#if (PTX >= 87) && (__CUDA_ARCH_FEAT_SM100_ALL || __CUDA_ARCH_FEAT_SM103_ALL) + + typedef __fp16 f16x2 __attribute__((ext_vector_type(2))); + typedef __bf16 bf16x2 __attribute__((ext_vector_type(2))); + typedef char uint8x4 __attribute__((ext_vector_type(4))); + +// CHECK_PTX87_SM100a: %[[R1:.*]] = call <2 x half> @llvm.nvvm.ff2f16x2.rs(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM100a: store <2 x half> %[[R1]], ptr %r1 +// CHECK_PTX87_SM103a: %[[R1:.*]] = call <2 x half> @llvm.nvvm.ff2f16x2.rs(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM103a: store <2 x half> %[[R1]], ptr %r1 + f16x2 r1 = __nvvm_ff2f16x2_rs(1.0f, 1.0f, 0); + +// CHECK_PTX87_SM100a: %[[R2:.*]] = call <2 x half> @llvm.nvvm.ff2f16x2.rs.relu(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM100a: store <2 x half> %[[R2]], ptr %r2 +// CHECK_PTX87_SM103a: %[[R2:.*]] = call <2 x half> @llvm.nvvm.ff2f16x2.rs.relu(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM103a: store <2 x half> %[[R2]], ptr %r2 + f16x2 r2 = __nvvm_ff2f16x2_rs_relu(1.0f, 1.0f, 0); + +// CHECK_PTX87_SM100a: %[[R3:.*]] = call <2 x half> @llvm.nvvm.ff2f16x2.rs.satfinite(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM100a: store <2 x half> %[[R3]], ptr %r3 +// CHECK_PTX87_SM103a: %[[R3:.*]] = call <2 x half> @llvm.nvvm.ff2f16x2.rs.satfinite(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM103a: store <2 x half> %[[R3]], ptr %r3 + f16x2 r3 = __nvvm_ff2f16x2_rs_satfinite(1.0f, 1.0f, 0); + +// CHECK_PTX87_SM100a: %[[R4:.*]] = call <2 x half> @llvm.nvvm.ff2f16x2.rs.relu.satfinite(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM100a: store <2 x half> %[[R4]], ptr %r4 +// CHECK_PTX87_SM103a: %[[R4:.*]] = call <2 x half> @llvm.nvvm.ff2f16x2.rs.relu.satfinite(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM103a: store <2 x half> %[[R4]], ptr %r4 + f16x2 r4 = __nvvm_ff2f16x2_rs_relu_satfinite(1.0f, 1.0f, 0); + +// CHECK_PTX87_SM100a: %[[R5:.*]] = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM100a: store <2 x bfloat> %[[R5]], ptr %r5 +// CHECK_PTX87_SM103a: %[[R5:.*]] = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM103a: store <2 x bfloat> %[[R5]], ptr %r5 + bf16x2 r5 = __nvvm_ff2bf16x2_rs(1.0f, 1.0f, 0); + +// CHECK_PTX87_SM100a: %[[R6:.*]] = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.relu(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM100a: store <2 x bfloat> %[[R6]], ptr %r6 +// CHECK_PTX87_SM103a: %[[R6:.*]] = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.relu(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM103a: store <2 x bfloat> %[[R6]], ptr %r6 + bf16x2 r6 = __nvvm_ff2bf16x2_rs_relu(1.0f, 1.0f, 0); + +// CHECK_PTX87_SM100a: %[[R7:.*]] = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.satfinite(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM100a: store <2 x bfloat> %[[R7]], ptr %r7 +// CHECK_PTX87_SM103a: %[[R7:.*]] = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.satfinite(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM103a: store <2 x bfloat> %[[R7]], ptr %r7 + bf16x2 r7 = __nvvm_ff2bf16x2_rs_satfinite(1.0f, 1.0f, 0); + +// CHECK_PTX87_SM100a: %[[R8:.*]] = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.relu.satfinite(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM100a: store <2 x bfloat> %[[R8]], ptr %r8 +// CHECK_PTX87_SM103a: %[[R8:.*]] = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.relu.satfinite(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM103a: store <2 x bfloat> %[[R8]], ptr %r8 + bf16x2 r8 = __nvvm_ff2bf16x2_rs_relu_satfinite(1.0f, 1.0f, 0); + +// CHECK_PTX87_SM100a: %[[R9:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e4m3x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store <4 x i8> %[[R9]], ptr %r9 +// CHECK_PTX87_SM103a: %[[R9:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e4m3x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store <4 x i8> %[[R9]], ptr %r9 + uint8x4 r9 = __nvvm_f32x4_to_e4m3x4_rs_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R10:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e4m3x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store <4 x i8> %[[R10]], ptr %r10 +// CHECK_PTX87_SM103a: %[[R10:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e4m3x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store <4 x i8> %[[R10]], ptr %r10 + uint8x4 r10 = __nvvm_f32x4_to_e4m3x4_rs_relu_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R11:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e5m2x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store <4 x i8> %[[R11]], ptr %r11 +// CHECK_PTX87_SM103a: %[[R11:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e5m2x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store <4 x i8> %[[R11]], ptr %r11 + uint8x4 r11 = __nvvm_f32x4_to_e5m2x4_rs_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R12:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e5m2x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store <4 x i8> %[[R12]], ptr %r12 +// CHECK_PTX87_SM103a: %[[R12:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e5m2x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store <4 x i8> %[[R12]], ptr %r12 + uint8x4 r12 = __nvvm_f32x4_to_e5m2x4_rs_relu_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R13:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e2m3x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store <4 x i8> %[[R13]], ptr %r13 +// CHECK_PTX87_SM103a: %[[R13:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e2m3x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store <4 x i8> %[[R13]], ptr %r13 + uint8x4 r13 = __nvvm_f32x4_to_e2m3x4_rs_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R14:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e2m3x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store <4 x i8> %[[R14]], ptr %r14 +// CHECK_PTX87_SM103a: %[[R14:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e2m3x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store <4 x i8> %[[R14]], ptr %r14 + uint8x4 r14 = __nvvm_f32x4_to_e2m3x4_rs_relu_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R15:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e3m2x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store <4 x i8> %[[R15]], ptr %r15 +// CHECK_PTX87_SM103a: %[[R15:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e3m2x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store <4 x i8> %[[R15]], ptr %r15 + uint8x4 r15 = __nvvm_f32x4_to_e3m2x4_rs_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R16:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e3m2x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store <4 x i8> %[[R16]], ptr %r16 +// CHECK_PTX87_SM103a: %[[R16:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e3m2x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store <4 x i8> %[[R16]], ptr %r16 + uint8x4 r16 = __nvvm_f32x4_to_e3m2x4_rs_relu_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R17:.*]] = call i16 @llvm.nvvm.f32x4.to.e2m1x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store i16 %[[R17]], ptr %r17 +// CHECK_PTX87_SM103a: %[[R17:.*]] = call i16 @llvm.nvvm.f32x4.to.e2m1x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store i16 %[[R17]], ptr %r17 + short r17 = __nvvm_f32x4_to_e2m1x4_rs_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R18:.*]] = call i16 @llvm.nvvm.f32x4.to.e2m1x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store i16 %[[R18]], ptr %r18 +// CHECK_PTX87_SM103a: %[[R18:.*]] = call i16 @llvm.nvvm.f32x4.to.e2m1x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store i16 %[[R18]], ptr %r18 + short r18 = __nvvm_f32x4_to_e2m1x4_rs_relu_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); +#endif +} + #define NAN32 0x7FBFFFFF #define NAN16 (__bf16)0x7FBF #define BF16 (__bf16)0.1f diff --git a/clang/test/CodeGenHLSL/BasicFeatures/AggregateSplatCast.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/AggregateSplatCast.hlsl index 512fcd4..9524f02 100644 --- a/clang/test/CodeGenHLSL/BasicFeatures/AggregateSplatCast.hlsl +++ b/clang/test/CodeGenHLSL/BasicFeatures/AggregateSplatCast.hlsl @@ -54,18 +54,16 @@ struct S { // struct splats // CHECK-LABEL: define void {{.*}}call3 -// CHECK: [[A:%.*]] = alloca <1 x i32>, align 4 +// CHECK: [[AA:%.*]] = alloca i32, align 4 // CHECK: [[s:%.*]] = alloca %struct.S, align 1 -// CHECK-NEXT: store <1 x i32> splat (i32 1), ptr [[A]], align 4 -// CHECK-NEXT: [[L:%.*]] = load <1 x i32>, ptr [[A]], align 4 -// CHECK-NEXT: [[VL:%.*]] = extractelement <1 x i32> [[L]], i32 0 +// CHECK-NEXT: store i32 %A, ptr [[AA]], align 4 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[AA]], align 4 // CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 0 // CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 1 -// CHECK-NEXT: store i32 [[VL]], ptr [[G1]], align 4 -// CHECK-NEXT: [[C:%.*]] = sitofp i32 [[VL]] to float +// CHECK-NEXT: store i32 [[L]], ptr [[G1]], align 4 +// CHECK-NEXT: [[C:%.*]] = sitofp i32 [[L]] to float // CHECK-NEXT: store float [[C]], ptr [[G2]], align 4 -export void call3() { - int1 A = {1}; +export void call3(int A) { S s = (S)A; } @@ -85,3 +83,41 @@ export void call5() { int1 A = {1}; S s = (S)A; } + +struct BFields { + double DF; + int E: 15; + int : 8; + float F; +}; + +struct Derived : BFields { + int G; +}; + +// derived struct with bitfields splat from scalar +// CHECK-LABEL: call6 +// CHECK: [[AAddr:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[D:%.*]] = alloca %struct.Derived, align 1 +// CHECK-NEXT: store i32 %A, ptr [[AAddr]], align 4 +// CHECK-NEXT: [[B:%.*]] = load i32, ptr [[AAddr]], align 4 +// CHECK-NEXT: [[Gep:%.*]] = getelementptr inbounds %struct.Derived, ptr [[D]], i32 0, i32 0 +// CHECK-NEXT: [[E:%.*]] = getelementptr inbounds nuw %struct.BFields, ptr [[Gep]], i32 0, i32 1 +// CHECK-NEXT: [[Gep1:%.*]] = getelementptr inbounds %struct.Derived, ptr [[D]], i32 0, i32 0, i32 0 +// CHECK-NEXT: [[Gep2:%.*]] = getelementptr inbounds %struct.Derived, ptr [[D]], i32 0, i32 0, i32 2 +// CHECK-NEXT: [[Gep3:%.*]] = getelementptr inbounds %struct.Derived, ptr [[D]], i32 0, i32 1 +// CHECK-NEXT: [[C:%.*]] = sitofp i32 [[B]] to double +// CHECK-NEXT: store double [[C]], ptr [[Gep1]], align 8 +// CHECK-NEXT: [[H:%.*]] = trunc i32 [[B]] to i24 +// CHECK-NEXT: [[BFL:%.*]] = load i24, ptr [[E]], align 1 +// CHECK-NEXT: [[BFV:%.*]] = and i24 [[H]], 32767 +// CHECK-NEXT: [[BFC:%.*]] = and i24 [[BFL]], -32768 +// CHECK-NEXT: [[BFS:%.*]] = or i24 [[BFC]], [[BFV]] +// CHECK-NEXT: store i24 [[BFS]], ptr [[E]], align 1 +// CHECK-NEXT: [[C4:%.*]] = sitofp i32 [[B]] to float +// CHECK-NEXT: store float [[C4]], ptr [[Gep2]], align 4 +// CHECK-NEXT: store i32 [[B]], ptr [[Gep3]], align 4 +// CHECK-NEXT: ret void +export void call6(int A) { + Derived D = (Derived)A; +} diff --git a/clang/test/CodeGenHLSL/BasicFeatures/ArrayElementwiseCast.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/ArrayElementwiseCast.hlsl index ac02ddf..5f2182e 100644 --- a/clang/test/CodeGenHLSL/BasicFeatures/ArrayElementwiseCast.hlsl +++ b/clang/test/CodeGenHLSL/BasicFeatures/ArrayElementwiseCast.hlsl @@ -10,7 +10,8 @@ // CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 0 // CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds [2 x i32], ptr [[Tmp]], i32 0, i32 1 // CHECK-NEXT: [[L:%.*]] = load i32, ptr [[G1]], align 4 -// CHECK-NEXT: store i32 [[L]], ptr [[B]], align 4 +// CHECK-NEXT: [[C:%.*]] = sitofp i32 [[L]] to float +// CHECK-NEXT: store float [[C]], ptr [[B]], align 4 export void call0() { int A[2] = {0,1}; float B = (float)A; @@ -141,3 +142,46 @@ export void call7() { int A[1] = {1}; A = (int[1])s; } + +struct BFields { + double D; + int E: 15; + int : 8; + float F; +}; + +struct Derived : BFields { + int G; +}; + +// flatten from a derived struct with bitfields +// CHECK-LABEL: call8 +// CHECK: [[A:%.*]] = alloca [4 x i32], align 4 +// CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.Derived, align 1 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[Tmp]], ptr align 1 %D, i32 19, i1 false) +// CHECK-NEXT: [[Gep:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 0, i32 0 +// CHECK-NEXT: [[Gep1:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 0, i32 1 +// CHECK-NEXT: [[Gep2:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 0, i32 2 +// CHECK-NEXT: [[Gep3:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 0, i32 3 +// CHECK-NEXT: [[Gep4:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[E:%.*]] = getelementptr inbounds nuw %struct.BFields, ptr [[Gep4]], i32 0, i32 1 +// CHECK-NEXT: [[Gep5:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 0, i32 0 +// CHECK-NEXT: [[Gep6:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 0, i32 2 +// CHECK-NEXT: [[Gep7:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[Z:%.*]] = load double, ptr [[Gep5]], align 8 +// CHECK-NEXT: [[C:%.*]] = fptosi double [[Z]] to i32 +// CHECK-NEXT: store i32 [[C]], ptr [[Gep]], align 4 +// CHECK-NEXT: [[BFL:%.*]] = load i24, ptr [[E]], align 1 +// CHECK-NEXT: [[BFShl:%.*]] = shl i24 [[BFL]], 9 +// CHECK-NEXT: [[BFAshr:%.*]] = ashr i24 [[BFShl]], 9 +// CHECK-NEXT: [[BFC:%.*]] = sext i24 [[BFAshr]] to i32 +// CHECK-NEXT: store i32 [[BFC]], ptr [[Gep1]], align 4 +// CHECK-NEXT: [[Y:%.*]] = load float, ptr [[Gep6]], align 4 +// CHECK-NEXT: [[C8:%.*]] = fptosi float [[Y]] to i32 +// CHECK-NEXT: store i32 [[C8]], ptr [[Gep2]], align 4 +// CHECK-NEXT: [[X:%.*]] = load i32, ptr [[Gep7]], align 4 +// CHECK-NEXT: store i32 [[X]], ptr [[Gep3]], align 4 +// CHECK-NEXT: ret void +export void call8(Derived D) { + int A[4] = (int[4])D; +} diff --git a/clang/test/CodeGenHLSL/BasicFeatures/StructElementwiseCast.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/StructElementwiseCast.hlsl index 81b9f5b..4e29994 100644 --- a/clang/test/CodeGenHLSL/BasicFeatures/StructElementwiseCast.hlsl +++ b/clang/test/CodeGenHLSL/BasicFeatures/StructElementwiseCast.hlsl @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.3-library -x hlsl -emit-llvm -disable-llvm-passes -o - %s | FileCheck %s +// RUN: %clang_cc1 -finclude-default-header -fnative-half-type -triple dxil-pc-shadermodel6.3-library -x hlsl -emit-llvm -disable-llvm-passes -o - %s | FileCheck %s struct S { int X; @@ -127,14 +127,219 @@ struct T { // CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 0 // CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds %struct.S, ptr [[s]], i32 0, i32 1 // CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds %struct.T, ptr [[Tmp]], i32 0, i32 0 -// CHECK-NEXT: %gep3 = getelementptr inbounds %struct.T, ptr %agg-temp, i32 0, i32 1 -// CHECK-NEXT: %gep4 = getelementptr inbounds %struct.T, ptr %agg-temp, i32 0, i32 2 -// CHECK-NEXT: %load = load i32, ptr %gep2, align 4 -// CHECK-NEXT: store i32 %load, ptr %gep, align 4 -// CHECK-NEXT: %load5 = load i32, ptr %gep3, align 4 -// CHECK-NEXT: %conv = sitofp i32 %load5 to float -// CHECK-NEXT: store float %conv, ptr %gep1, align 4 +// CHECK-NEXT: [[G4:%.*]] = getelementptr inbounds %struct.T, ptr %agg-temp, i32 0, i32 1 +// CHECK-NEXT: [[G5:%.*]] = getelementptr inbounds %struct.T, ptr %agg-temp, i32 0, i32 2 +// CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[G3]], align 4 +// CHECK-NEXT: store i32 [[L1]], ptr [[G1]], align 4 +// CHECK-NEXT: [[L2:%.*]] = load i32, ptr [[G4]], align 4 +// CHECK-NEXT: [[C:%.*]] = sitofp i32 [[L2]] to float +// CHECK-NEXT: store float [[C]], ptr [[G2]], align 4 export void call8() { T t = {1,2,3}; S s = (S)t; } + +struct BFields { + double D; + int E: 15; + int : 8; + float F; +}; + +struct Derived : BFields { + int G; +}; + +// Derived Struct truncate to scalar +// CHECK-LABEL: call9 +// CHECK: [[D2:%.*]] = alloca double, align 8 +// CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.Derived, align 1 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[Tmp]], ptr align 1 %D, i32 19, i1 false) +// CHECK-NEXT: [[Gep:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[E:%.*]] = getelementptr inbounds nuw %struct.BFields, ptr [[Gep]], i32 0, i32 1 +// CHECK-NEXT: [[Gep1:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 0, i32 0 +// CHECK-NEXT: [[Gep2:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 0, i32 2 +// CHECK-NEXT: [[Gep3:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[A:%.*]] = load double, ptr [[Gep1]], align 8 +// CHECK-NEXT: store double [[A]], ptr [[D2]], align 8 +// CHECK-NEXT: ret void +export void call9(Derived D) { + double D2 = (double)D; +} + +// Derived struct from vector +// CHECK-LABEL: call10 +// CHECK: [[IAddr:%.*]] = alloca <4 x i32>, align 16 +// CHECK-NEXT: [[D:%.*]] = alloca %struct.Derived, align 1 +// CHECK-NEXT: store <4 x i32> %I, ptr [[IAddr]], align 16 +// CHECK-NEXT: [[A:%.*]] = load <4 x i32>, ptr [[IAddr]], align 16 +// CHECK-NEXT: [[Gep:%.*]] = getelementptr inbounds %struct.Derived, ptr [[D]], i32 0, i32 0 +// CHECK-NEXT: [[E:%.*]] = getelementptr inbounds nuw %struct.BFields, ptr [[Gep]], i32 0, i32 1 +// CHECK-NEXT: [[Gep1:%.*]] = getelementptr inbounds %struct.Derived, ptr [[D]], i32 0, i32 0, i32 0 +// CHECK-NEXT: [[Gep2:%.*]] = getelementptr inbounds %struct.Derived, ptr [[D]], i32 0, i32 0, i32 2 +// CHECK-NEXT: [[Gep3:%.*]] = getelementptr inbounds %struct.Derived, ptr [[D]], i32 0, i32 1 +// CHECK-NEXT: [[VL:%.*]] = extractelement <4 x i32> [[A]], i64 0 +// CHECK-NEXT: [[C:%.*]] = sitofp i32 [[VL]] to double +// CHECK-NEXT: store double [[C]], ptr [[Gep1]], align 8 +// CHECK-NEXT: [[VL4:%.*]] = extractelement <4 x i32> [[A]], i64 1 +// CHECK-NEXT: [[B:%.*]] = trunc i32 [[VL4]] to i24 +// CHECK-NEXT: [[BFL:%.*]] = load i24, ptr [[E]], align 1 +// CHECK-NEXT: [[BFV:%.*]] = and i24 [[B]], 32767 +// CHECK-NEXT: [[BFC:%.*]] = and i24 [[BFL]], -32768 +// CHECK-NEXT: [[BFSet:%.*]] = or i24 [[BFC]], [[BFV]] +// CHECK-NEXT: store i24 [[BFSet]], ptr [[E]], align 1 +// CHECK-NEXT: [[VL5:%.*]] = extractelement <4 x i32> [[A]], i64 2 +// CHECK-NEXT: [[C6:%.*]] = sitofp i32 [[VL5]] to float +// CHECK-NEXT: store float [[C6]], ptr [[Gep2]], align 4 +// CHECK-NEXT: [[VL7:%.*]] = extractelement <4 x i32> [[A]], i64 3 +// CHECK-NEXT: store i32 [[VL7]], ptr [[Gep3]], align 4 +// CHECK-NEXT: ret void +export void call10(int4 I) { + Derived D = (Derived)I; +} + +// truncate derived struct +// CHECK-LABEL: call11 +// CHECK: [[B:%.*]] = alloca %struct.BFields, align 1 +// CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.Derived, align 1 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[Tmp]], ptr align 1 [[D]], i32 19, i1 false) +// CHECK-NEXT: [[Gep:%.*]] = getelementptr inbounds %struct.BFields, ptr [[B]], i32 0 +// CHECK-NEXT: [[E:%.*]] = getelementptr inbounds nuw %struct.BFields, ptr [[Gep]], i32 0, i32 1 +// CHECK-NEXT: [[Gep1:%.*]] = getelementptr inbounds %struct.BFields, ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[Gep2:%.*]] = getelementptr inbounds %struct.BFields, ptr [[B]], i32 0, i32 2 +// CHECK-NEXT: [[Gep3:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[E4:%.*]] = getelementptr inbounds nuw %struct.BFields, ptr [[Gep3]], i32 0, i32 1 +// CHECK-NEXT: [[Gep5:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 0, i32 0 +// CHECK-NEXT: [[Gep6:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 0, i32 2 +// CHECK-NEXT: [[Gep7:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[A:%.*]] = load double, ptr [[Gep5]], align 8 +// CHECK-NEXT: store double [[A]], ptr [[Gep1]], align 8 +// CHECK-NEXT: [[BFl:%.*]] = load i24, ptr [[E4]], align 1 +// CHECK-NEXT: [[Shl:%.*]] = shl i24 [[BFL]], 9 +// CHECK-NEXT: [[Ashr:%.*]] = ashr i24 [[Shl]], 9 +// CHECK-NEXT: [[BFC:%.*]] = sext i24 [[Ashr]] to i32 +// CHECK-NEXT: [[B:%.*]] = trunc i32 [[BFC]] to i24 +// CHECK-NEXT: [[BFL8:%.*]] = load i24, ptr [[E]], align 1 +// CHECK-NEXT: [[BFV:%.*]] = and i24 [[B]], 32767 +// CHECK-NEXT: [[BFC:%.*]] = and i24 [[BFL8]], -32768 +// CHECK-NEXT: [[BFSet:%.*]] = or i24 [[BFC]], [[BFV]] +// CHECK-NEXT: store i24 [[BFSet]], ptr [[E]], align 1 +// CHECK-NEXT: [[C:%.*]] = load float, ptr [[Gep6]], align 4 +// CHECK-NEXT: store float [[C]], ptr [[Gep2]], align 4 +// CHECK-NEXT: ret void +export void call11(Derived D) { + BFields B = (BFields)D; +} + +struct Empty { +}; + +// cast to an empty struct +// CHECK-LABEL: call12 +// CHECK: [[I:%.*]] = alloca <4 x i32>, align 16 +// CHECK-NEXT: [[E:%.*]] = alloca %struct.Empty, align 1 +// CHECK-NEXT: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr [[I]], align 16 +// CHECK-NEXT: [[A:%.*]] = load <4 x i32>, ptr [[I]], align 16 +// CHECK-NEXt: ret void +export void call12() { + int4 I = {1,2,3,4}; + Empty E = (Empty)I; +} + +struct MoreBFields { + int A; + uint64_t B: 60; + float C; + uint16_t D: 10; + uint16_t E: 6; + int : 32; + double F; + int : 8; + uint G; +}; + +// more complicated bitfield case +// CHECK-LABEL: call13 +// CHECK: [[AA:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[MBF:%.*]] = alloca %struct.MoreBFields, align 1 +// CHECK-NEXT: store i32 %A, ptr [[AA]], align 4 +// CHECK-NEXT: [[Z:%.*]] = load i32, ptr [[AA]], align 4 +// get the gep for the struct. +// CHECK-NEXT: [[Gep:%.*]] = getelementptr inbounds %struct.MoreBFields, ptr [[MBF]], i32 0 +// CHECK-NEXT: [[FieldB:%.*]] = getelementptr inbounds nuw %struct.MoreBFields, ptr [[Gep]], i32 0, i32 1 +// D and E share the same field index +// CHECK-NEXT: [[FieldD:%.*]] = getelementptr inbounds nuw %struct.MoreBFields, ptr [[Gep]], i32 0, i32 3 +// CHECK-NEXT: [[FieldE:%.*]] = getelementptr inbounds nuw %struct.MoreBFields, ptr [[Gep]], i32 0, i32 3 +// CHECK-NEXT: [[FieldA:%.*]] = getelementptr inbounds %struct.MoreBFields, ptr [[MBF]], i32 0, i32 0 +// CHECK-NEXT: [[FieldC:%.*]] = getelementptr inbounds %struct.MoreBFields, ptr [[MBF]], i32 0, i32 2 +// CHECK-NEXT: [[FieldF:%.*]] = getelementptr inbounds %struct.MoreBFields, ptr [[MBF]], i32 0, i32 5 +// CHECK-NEXT: [[FieldG:%.*]] = getelementptr inbounds %struct.MoreBFields, ptr [[MBF]], i32 0, i32 7 +// store int A into field A +// CHECK-NEXT: store i32 [[Z]], ptr [[FieldA]], align 4 +// store int A in bitField B, do necessary conversions +// CHECK-NEXT: [[Conv:%.*]] = sext i32 [[Z]] to i64 +// CHECK-NEXT: [[BFL:%.*]] = load i64, ptr [[FieldB]], align 1 +// CHECK-NEXT: [[BFV:%.*]] = and i64 [[Conv]], 1152921504606846975 +// CHECK-NEXT: [[BFC:%.*]] = and i64 [[BFL]], -1152921504606846976 +// CHECK-NEXT: [[BFS:%.*]] = or i64 [[BFC]], [[BFV]] +// CHECK-NEXT: store i64 [[BFS]], ptr [[FieldB]], align 1 +// store int A into field C +// CHECK-NEXT: [[Conv5:%.*]] = sitofp i32 [[Z]] to float +// CHECK-NEXT: store float [[Conv5]], ptr [[FieldC]], align 4 +// store int A into bitfield D +// CHECK-NEXT: [[Conv6:%.*]] = trunc i32 [[Z]] to i16 +// CHECK-NEXT: [[FDL:%.*]] = load i16, ptr [[FieldD]], align 1 +// CHECK-NEXT: [[FDV:%.*]] = and i16 [[Conv6]], 1023 +// CHECK-NEXT: [[FDC:%.*]] = and i16 [[FDL]], -1024 +// CHECK-NEXT: [[FDS:%.*]] = or i16 [[FDC]], [[FDV]] +// CHECK-NEXT: store i16 [[FDS]], ptr [[FieldD]], align 1 +// store int A into bitfield E; +// CHECK-NEXT: [[Conv11:%.*]] = trunc i32 [[Z]] to i16 +// CHECK-NEXT: [[FEL:%.*]] = load i16, ptr [[FieldE]], align 1 +// CHECK-NEXT: [[FEV:%.*]] = and i16 [[Conv11]], 63 +// CHECK-NEXT: [[FESHL:%.*]] = shl i16 [[FEV]], 10 +// CHECK-NEXT: [[FEC:%.*]] = and i16 [[FEL]], 1023 +// CHECK-NEXT: [[FES:%.*]] = or i16 [[FEC]], [[FESHL]] +// CHECK-NEXT: store i16 [[FES]], ptr [[FieldE]], align 1 +// store int A into field F +// CHECK-NEXT: [[Conv16:%.*]] = sitofp i32 [[Z]] to double +// CHECK-NEXT: store double [[Conv16]], ptr [[FieldF]], align 8 +// store int A into field G +// CHECK-NEXT: store i32 [[Z]], ptr [[FieldG]], align 4 +// CHECK-NEXT: ret void +export void call13(int A) { + MoreBFields MBF = (MoreBFields)A; +} + +struct Inner { + int Z; + int Y : 25; +}; + +struct Outer { + int A; + Inner I; +}; + +// show usage of "extra" gep for struct containing bitfield +// CHECK-LABEL: call14 +// CHECK: [[AA:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[O:%.*]] = alloca %struct.Outer, align 1 +// CHECK-NEXT: store i32 %A, ptr [[AA]], align 4 +// CHECK-NEXT: [[Z:%.*]] = load i32, ptr [[AA]], align 4 +// CHECK-NEXT: [[FieldA:%.*]] = getelementptr inbounds %struct.Outer, ptr [[O]], i32 0, i32 0 +// showing real usage of "extra gep". need Inner struct to generate access of its bitfield. +// CHECK-NEXT: [[FieldI:%.*]] = getelementptr inbounds %struct.Outer, ptr [[O]], i32 0, i32 1 +// CHECK-NEXT: [[FieldY:%.*]] = getelementptr inbounds nuw %struct.Inner, ptr [[FieldI]], i32 0, i32 1 +// CHECK-NEXT: [[FieldZ:%.*]] = getelementptr inbounds %struct.Outer, ptr [[O]], i32 0, i32 1, i32 0 +// CHECK-NEXT: store i32 [[Z]], ptr [[FieldA]], align 4 +// CHECK-NEXT: store i32 [[Z]], ptr [[FieldZ]], align 4 +// CHECK-NEXT: [[BFL:%.*]] = load i32, ptr [[FieldY]], align 1 +// CHECK-NEXT: [[BFV:%.*]] = and i32 [[Z]], 33554431 +// CHECK-NEXT: [[BFC:%.*]] = and i32 [[BFL]], -33554432 +// CHECK-NEXT: [[BFS:%.*]] = or i32 [[BFC]], [[BFV]] +// CHECK-NEXT: store i32 [[BFS]], ptr [[FieldY]], align 1 +// CHECK-NEXT: ret void +export void call14(int A) { + Outer O = (Outer)A; +} diff --git a/clang/test/CodeGenHLSL/BasicFeatures/VectorElementwiseCast.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/VectorElementwiseCast.hlsl index 253b38a7c..26aa41a 100644 --- a/clang/test/CodeGenHLSL/BasicFeatures/VectorElementwiseCast.hlsl +++ b/clang/test/CodeGenHLSL/BasicFeatures/VectorElementwiseCast.hlsl @@ -79,3 +79,45 @@ export void call5() { S s = {1, 2.0}; int A = (int)s; } + +struct BFields { + double D; + int E: 15; + int : 8; + float F; +}; + +struct Derived : BFields { + int G; +}; + +// vector flat cast from derived struct with bitfield +// CHECK-LABEL: call6 +// CHECK: [[A:%.*]] = alloca <4 x i32>, align 16 +// CHECK-NEXT: [[Tmp:%.*]] = alloca %struct.Derived, align 1 +// CHECK-NEXT: [[FlatTmp:%.*]] = alloca <4 x i32>, align 16 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[Tmp]], ptr align 1 %D, i32 19, i1 false) +// CHECK-NEXT: [[Gep:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 0 +// CHECK-NEXT: [[E:%.*]] = getelementptr inbounds nuw %struct.BFields, ptr [[Gep]], i32 0, i32 1 +// CHECK-NEXT: [[Gep1:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 0, i32 0 +// CHECK-NEXT: [[Gep2:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 0, i32 2 +// CHECK-NEXT: [[Gep3:%.*]] = getelementptr inbounds %struct.Derived, ptr [[Tmp]], i32 0, i32 1 +// CHECK-NEXT: [[Z:%.*]] = load <4 x i32>, ptr [[FlatTmp]], align 16 +// CHECK-NEXT: [[Y:%.*]] = load double, ptr [[Gep1]], align 8 +// CHECK-NEXT: [[C:%.*]] = fptosi double [[Y]] to i32 +// CHECK-NEXT: [[X:%.*]] = insertelement <4 x i32> [[Z]], i32 [[C]], i64 0 +// CHECK-NEXT: [[BFL:%.*]] = load i24, ptr [[E]], align 1 +// CHECK-NEXT: [[BFShl:%.*]] = shl i24 [[BFL]], 9 +// CHECK-NEXT: [[BFAshr:%.*]] = ashr i24 [[BFShl]], 9 +// CHECK-NEXT: [[BFC:%.*]] = sext i24 [[BFAshr]] to i32 +// CHECK-NEXT: [[W:%.*]] = insertelement <4 x i32> [[X]], i32 [[BFC]], i64 1 +// CHECK-NEXT: [[V:%.*]] = load float, ptr [[Gep2]], align 4 +// CHECK-NEXT: [[C4:%.*]] = fptosi float [[V]] to i32 +// CHECK-NEXT: [[U:%.*]] = insertelement <4 x i32> [[W]], i32 [[C4]], i64 2 +// CHECK-NEXT: [[T:%.*]] = load i32, ptr [[Gep3]], align 4 +// CHECK-NEXT: [[S:%.*]] = insertelement <4 x i32> [[U]], i32 [[T]], i64 3 +// CHECK-NEXT: store <4 x i32> [[S]], ptr [[A]], align 16 +// CHECK-NEXT: ret void +export void call6(Derived D) { + int4 A = (int4)D; +} diff --git a/clang/test/CodeGenOpenCL/amdgpu-features.cl b/clang/test/CodeGenOpenCL/amdgpu-features.cl index af1ef64..7cc83c0 100644 --- a/clang/test/CodeGenOpenCL/amdgpu-features.cl +++ b/clang/test/CodeGenOpenCL/amdgpu-features.cl @@ -109,8 +109,8 @@ // GFX1153: "target-features"="+16-bit-insts,+atomic-fadd-rtn-insts,+atomic-fmin-fmax-global-f32,+ci-insts,+dl-insts,+dot10-insts,+dot12-insts,+dot5-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32" // GFX1200: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot10-insts,+dot11-insts,+dot12-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+fp8-conversion-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32" // GFX1201: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot10-insts,+dot11-insts,+dot12-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+fp8-conversion-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32" -// GFX1250: "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32" -// GFX1251: "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32" +// GFX1250: "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+clusters,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32" +// GFX1251: "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+clusters,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32" // GFX1103-W64: "target-features"="+16-bit-insts,+atomic-fadd-rtn-insts,+atomic-fmin-fmax-global-f32,+ci-insts,+dl-insts,+dot10-insts,+dot12-insts,+dot5-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize64" diff --git a/clang/test/Headers/arm-acle-header.c b/clang/test/Headers/arm-acle-header.c index fea8472..58fcc66 100644 --- a/clang/test/Headers/arm-acle-header.c +++ b/clang/test/Headers/arm-acle-header.c @@ -10,6 +10,8 @@ // RUN: %clang_cc1 -x c++ -triple arm64ec-windows -target-cpu cortex-a53 -fsyntax-only -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=19.11 %s // expected-no-diagnostics +#include "system_reserved_names.h" + #include <arm_acle.h> #ifdef _MSC_VER #include <intrin.h> diff --git a/clang/test/Headers/arm-cde-header.c b/clang/test/Headers/arm-cde-header.c index 1f60368..526202a 100644 --- a/clang/test/Headers/arm-cde-header.c +++ b/clang/test/Headers/arm-cde-header.c @@ -9,5 +9,7 @@ // Check that the headers don't conflict with each other +#include "system_reserved_names.h" + #include <arm_cde.h> #include <arm_mve.h> diff --git a/clang/test/Headers/arm-cmse-header.c b/clang/test/Headers/arm-cmse-header.c index 862572d..c21c1ff 100644 --- a/clang/test/Headers/arm-cmse-header.c +++ b/clang/test/Headers/arm-cmse-header.c @@ -2,6 +2,8 @@ // RUN: %clang_cc1 -triple thumbv8m.base-eabi -fsyntax-only -ffreestanding -x c++ %s -verify -mcmse // expected-no-diagnostics +#include "system_reserved_names.h" + #include <arm_cmse.h> typedef void (*callback_t)(void); diff --git a/clang/test/Headers/arm-fp16-header.c b/clang/test/Headers/arm-fp16-header.c index b1a87fa..e472654 100644 --- a/clang/test/Headers/arm-fp16-header.c +++ b/clang/test/Headers/arm-fp16-header.c @@ -18,4 +18,6 @@ // REQUIRES: aarch64-registered-target || arm-registered-target +#include "system_reserved_names.h" + #include <arm_fp16.h> diff --git a/clang/test/Headers/arm-neon-header.c b/clang/test/Headers/arm-neon-header.c index 89bd5aa..43b1b35 100644 --- a/clang/test/Headers/arm-neon-header.c +++ b/clang/test/Headers/arm-neon-header.c @@ -26,4 +26,6 @@ // REQUIRES: aarch64-registered-target || arm-registered-target +#include "system_reserved_names.h" + #include <arm_neon.h> diff --git a/clang/test/Headers/system_reserved_names.h b/clang/test/Headers/system_reserved_names.h new file mode 100644 index 0000000..1a53f4f --- /dev/null +++ b/clang/test/Headers/system_reserved_names.h @@ -0,0 +1,165 @@ +// Test that headers are not tripped up by the surrounding code defining various +// alphabetic macros. Also ensure that we don't swallow the definition of user +// provided macros (in other words, ensure that we push/pop correctly everywhere). +// +// The contents of this header is a lightly trimmed version of +// libcxx/test/libcxx/system_reserved_names.gen.py; additions to that testcase +// can be synced into this header as well. + +#define SYSTEM_RESERVED_NAME This name should not be used in Clang headers + +// libc++ does not use single-letter names as a matter of principle. +// But Windows' own <wchar.h>, <math.h>, and <exception> use many of these +// (at least C,E,F,I,M,N,P,S,X,Y,Z) as uglified function parameter names, +// so don't define these on Windows. +// +#ifndef _WIN32 +#define _A SYSTEM_RESERVED_NAME +#define _B SYSTEM_RESERVED_NAME +#define _C SYSTEM_RESERVED_NAME +#define _D SYSTEM_RESERVED_NAME +#define _E SYSTEM_RESERVED_NAME +#define _F SYSTEM_RESERVED_NAME +#define _G SYSTEM_RESERVED_NAME +#define _H SYSTEM_RESERVED_NAME +#define _I SYSTEM_RESERVED_NAME +#define _J SYSTEM_RESERVED_NAME +#define _K SYSTEM_RESERVED_NAME +#define _L SYSTEM_RESERVED_NAME +#define _M SYSTEM_RESERVED_NAME +#define _N SYSTEM_RESERVED_NAME +#define _O SYSTEM_RESERVED_NAME +#define _P SYSTEM_RESERVED_NAME +#define _Q SYSTEM_RESERVED_NAME +#define _R SYSTEM_RESERVED_NAME +#define _S SYSTEM_RESERVED_NAME +#define _T SYSTEM_RESERVED_NAME +#define _U SYSTEM_RESERVED_NAME +#define _V SYSTEM_RESERVED_NAME +#define _W SYSTEM_RESERVED_NAME +#define _X SYSTEM_RESERVED_NAME +#define _Y SYSTEM_RESERVED_NAME +#define _Z SYSTEM_RESERVED_NAME +#endif + +// FreeBSD's <sys/types.h> uses _M +// +#ifdef __FreeBSD__ +# undef _M +#endif + +// Test that libc++ doesn't use names that collide with FreeBSD system macros. +// newlib and picolibc also define these macros +#if !defined(__FreeBSD__) && !defined(_NEWLIB_VERSION) +# define __null_sentinel SYSTEM_RESERVED_NAME +# define __generic SYSTEM_RESERVED_NAME +#endif + +// tchar.h defines these macros on Windows +#ifndef _WIN32 +# define _UI SYSTEM_RESERVED_NAME +# define _PUC SYSTEM_RESERVED_NAME +# define _CPUC SYSTEM_RESERVED_NAME +# define _PC SYSTEM_RESERVED_NAME +# define _CRPC SYSTEM_RESERVED_NAME +# define _CPC SYSTEM_RESERVED_NAME +#endif + +// yvals.h on MINGW defines this macro +#ifndef _WIN32 +# define _C2 SYSTEM_RESERVED_NAME +#endif + +// Test that libc++ doesn't use names that collide with Win32 API macros. +// Obviously we can only define these on non-Windows platforms. +#ifndef _WIN32 +# define __allocator SYSTEM_RESERVED_NAME +# define __bound SYSTEM_RESERVED_NAME +# define __deallocate SYSTEM_RESERVED_NAME +# define __deref SYSTEM_RESERVED_NAME +# define __format_string SYSTEM_RESERVED_NAME +# define __full SYSTEM_RESERVED_NAME +# define __in SYSTEM_RESERVED_NAME +# define __inout SYSTEM_RESERVED_NAME +# define __nz SYSTEM_RESERVED_NAME +# define __out SYSTEM_RESERVED_NAME +# define __part SYSTEM_RESERVED_NAME +# define __post SYSTEM_RESERVED_NAME +# define __pre SYSTEM_RESERVED_NAME +#endif + +// Newlib & picolibc use __input as a parameter name of a64l & l64a +#ifndef _NEWLIB_VERSION +# define __input SYSTEM_RESERVED_NAME +#endif +#define __output SYSTEM_RESERVED_NAME + +#define __acquire SYSTEM_RESERVED_NAME +#define __release SYSTEM_RESERVED_NAME + +// Android and FreeBSD use this for __attribute__((__unused__)) +#if !defined(__FreeBSD__) && !defined(__ANDROID__) +#define __unused SYSTEM_RESERVED_NAME +#endif + +// These names are not reserved, so the user can macro-define them. +// These are intended to find improperly _Uglified template parameters. +#define A SYSTEM_RESERVED_NAME +#define Arg SYSTEM_RESERVED_NAME +#define Args SYSTEM_RESERVED_NAME +#define As SYSTEM_RESERVED_NAME +#define B SYSTEM_RESERVED_NAME +#define Bs SYSTEM_RESERVED_NAME +#define C SYSTEM_RESERVED_NAME +#define Cp SYSTEM_RESERVED_NAME +#define Cs SYSTEM_RESERVED_NAME +// Windows setjmp.h contains a struct member named 'D' on ARM/AArch64. +#ifndef _WIN32 +# define D SYSTEM_RESERVED_NAME +#endif +#define Dp SYSTEM_RESERVED_NAME +#define Ds SYSTEM_RESERVED_NAME +#define E SYSTEM_RESERVED_NAME +#define Ep SYSTEM_RESERVED_NAME +#define Es SYSTEM_RESERVED_NAME +#define N SYSTEM_RESERVED_NAME +#define Np SYSTEM_RESERVED_NAME +#define Ns SYSTEM_RESERVED_NAME +#define R SYSTEM_RESERVED_NAME +#define Rp SYSTEM_RESERVED_NAME +#define Rs SYSTEM_RESERVED_NAME +#define T SYSTEM_RESERVED_NAME +#define Tp SYSTEM_RESERVED_NAME +#define Ts SYSTEM_RESERVED_NAME +#define Type SYSTEM_RESERVED_NAME +#define Types SYSTEM_RESERVED_NAME +#define U SYSTEM_RESERVED_NAME +#define Up SYSTEM_RESERVED_NAME +#define Us SYSTEM_RESERVED_NAME +#define V SYSTEM_RESERVED_NAME +#define Vp SYSTEM_RESERVED_NAME +#define Vs SYSTEM_RESERVED_NAME +#define X SYSTEM_RESERVED_NAME +#define Xp SYSTEM_RESERVED_NAME +#define Xs SYSTEM_RESERVED_NAME + +// The classic Windows min/max macros +#define min SYSTEM_RESERVED_NAME +#define max SYSTEM_RESERVED_NAME + +// Test to make sure curses has no conflicting macros with the standard library +#define move SYSTEM_RESERVED_NAME +#define erase SYSTEM_RESERVED_NAME +#define refresh SYSTEM_RESERVED_NAME + +// Dinkumware libc ctype.h uses these definitions +#define _XA SYSTEM_RESERVED_NAME +#define _XS SYSTEM_RESERVED_NAME +#define _BB SYSTEM_RESERVED_NAME +#define _CN SYSTEM_RESERVED_NAME +#define _DI SYSTEM_RESERVED_NAME +#define _LO SYSTEM_RESERVED_NAME +#define _PU SYSTEM_RESERVED_NAME +#define _SP SYSTEM_RESERVED_NAME +#define _UP SYSTEM_RESERVED_NAME +#define _XD SYSTEM_RESERVED_NAME diff --git a/clang/test/Headers/x86-intrinsics-headers-clean.cpp b/clang/test/Headers/x86-intrinsics-headers-clean.cpp index a19207f..0a04bce 100644 --- a/clang/test/Headers/x86-intrinsics-headers-clean.cpp +++ b/clang/test/Headers/x86-intrinsics-headers-clean.cpp @@ -10,4 +10,6 @@ // expected-no-diagnostics +#include "system_reserved_names.h" + #include <x86intrin.h> diff --git a/clang/test/Headers/x86-intrinsics-headers.c b/clang/test/Headers/x86-intrinsics-headers.c index dc06cbd..89a7d4d 100644 --- a/clang/test/Headers/x86-intrinsics-headers.c +++ b/clang/test/Headers/x86-intrinsics-headers.c @@ -5,6 +5,8 @@ // XFAIL: target=arm64ec-pc-windows-msvc // These intrinsics are not yet implemented for Arm64EC. +#include "system_reserved_names.h" + #if defined(i386) || defined(__x86_64__) #ifdef __SSE4_2__ diff --git a/clang/test/Misc/amdgcn.languageOptsOpenCL.cl b/clang/test/Misc/amdgcn.languageOptsOpenCL.cl index 50c78d7..80c0825 100644 --- a/clang/test/Misc/amdgcn.languageOptsOpenCL.cl +++ b/clang/test/Misc/amdgcn.languageOptsOpenCL.cl @@ -8,6 +8,9 @@ // RUN: %clang_cc1 -x cl -cl-std=CL1.2 %s -verify -triple amdgcn-unknown-unknown -Wpedantic-core-features -DTEST_CORE_FEATURES // RUN: %clang_cc1 -x cl -cl-std=CL2.0 %s -verify -triple amdgcn-unknown-unknown -Wpedantic-core-features -DTEST_CORE_FEATURES +// RUN: %clang_cc1 -x cl -cl-std=CL3.0 %s -verify -triple amdgcn-unknown-unknown -Wpedantic-core-features -DTEST_CORE_FEATURES +// RUN: %clang_cc1 -x cl -cl-std=CL3.0 %s -verify -triple amdgcn-unknown-unknown -target-cpu gfx700 -Wpedantic-core-features -DTEST_CORE_FEATURES -DFLAT_SUPPORT + // Extensions in all versions #ifndef cl_clang_storage_class_specifiers #error "Missing cl_clang_storage_class_specifiers define" @@ -156,10 +159,31 @@ #pragma OPENCL EXTENSION cl_amd_media_ops2: enable #if (__OPENCL_C_VERSION__ >= 300) -#ifndef __opencl_c_generic_address_space -#error "Missing __opencl_c_generic_address_space define" -#else -#error "Incorrect __opencl_c_generic_address_space define" + #ifndef __opencl_c_program_scope_global_variables + #error "Missing __opencl_c_program_scope_global_variables define" + #endif #endif -#pragma OPENCL EXTENSION __opencl_c_generic_address_space: enable + +#if (__OPENCL_C_VERSION__ >= 300) + #ifdef FLAT_SUPPORT + #ifndef __opencl_c_generic_address_space + #error "Missing __opencl_c_generic_address_space define" + #endif + #else + #ifdef __opencl_c_generic_address_space + #error "Incorrect __opencl_c_generic_address_space define" + #endif + #endif +#endif + +#if (__OPENCL_C_VERSION__ >= 300) + #ifdef FLAT_SUPPORT + #ifndef __opencl_c_device_enqueue + #error "Missing __opencl_c_device_enqueue define" + #endif + #else + #ifdef __opencl_c_device_enqueue + #error "Incorrect __opencl_c_device_enqueue define" + #endif + #endif #endif diff --git a/clang/test/Modules/pr133057.cpp b/clang/test/Modules/pr133057.cpp new file mode 100644 index 0000000..b273fc3 --- /dev/null +++ b/clang/test/Modules/pr133057.cpp @@ -0,0 +1,143 @@ +// RUN: rm -rf %t +// RUN: mkdir -p %t +// RUN: split-file %s %t +// +// RUN: %clang_cc1 -xc++ -std=c++20 -emit-module -fmodule-name=hf -fno-cxx-modules -fmodules -fno-implicit-modules %t/CMO.cppmap -o %t/WI9.pcm +// RUN: %clang_cc1 -xc++ -std=c++20 -emit-module -fmodule-name=g -fno-cxx-modules -fmodules -fno-implicit-modules -fmodule-file=%t/WI9.pcm %t/E6H.cppmap -o %t/4BK.pcm +// RUN: %clang_cc1 -xc++ -std=c++20 -emit-module -fmodule-name=r -fno-cxx-modules -fmodules -fno-implicit-modules -fmodule-file=%t/WI9.pcm %t/HMT.cppmap -o %t/LUM.pcm +// RUN: %clang_cc1 -xc++ -std=c++20 -emit-module -fmodule-name=q -fno-cxx-modules -fmodules -fno-implicit-modules -fmodule-file=%t/LUM.pcm -fmodule-file=%t/4BK.pcm %t/JOV.cppmap -o %t/9VX.pcm +// RUN: %clang_cc1 -xc++ -std=c++20 -verify -fsyntax-only -fno-cxx-modules -fmodules -fno-implicit-modules -fmodule-file=%t/9VX.pcm %t/XFD.cc + +//--- 2OT.h +#include "LQ1.h" + +namespace ciy { +namespace xqk { +template <typename> +class vum { + public: + using sc = std::C::wmd; + friend bool operator==(vum, vum); +}; +template <typename> +class me { + public: + using vbh = vum<me>; + using sc = std::C::vy<vbh>::sc; + template <typename db> + operator db() { return {}; } +}; +} // namespace xqk +template <typename vus> +xqk::me<vus> uvo(std::C::wmd, vus); +} // namespace ciy + +class ua { + std::C::wmd kij() { + ciy::uvo(kij(), '-'); + return {}; + } +}; + +//--- 9KF.h +#include "LQ1.h" +#include "2OT.h" +namespace { +void al(std::C::wmd lou) { std::C::jv<std::C::wmd> yt = ciy::uvo(lou, '/'); } +} // namespace + +//--- CMO.cppmap +module "hf" { +header "LQ1.h" +} + + +//--- E6H.cppmap +module "g" { +export * +header "2OT.h" +} + + +//--- HMT.cppmap +module "r" { +header "2OT.h" +} + + +//--- JOV.cppmap +module "q" { +header "9KF.h" +} + + +//--- LQ1.h +namespace std { +namespace C { +template <class zd> +struct vy : zd {}; +template <class ub> +struct vy<ub*> { + typedef ub jz; +}; +struct wmd {}; +template <class uo, class zt> +void sk(uo k, zt gf) { + (void)(k != gf); +} +template <class uo> +class fm { + public: + fm(uo); +}; +template <class kj, class kju> +bool operator==(kj, kju); +template <class epn> +void afm(epn) { + using yp = vy<epn>; + if (__is_trivially_copyable(yp)) { + sk(fm(epn()), nullptr); + } +} +template <class ub> +class jv { + public: + constexpr void gq(); + ub *nef; +}; +template <class ub> +constexpr void jv<ub>::gq() { + afm(nef); +} +} // namespace C +} // namespace std +namespace ciy { +} // namespace ciy + +//--- XFD.cc +// expected-no-diagnostics +#include "LQ1.h" +#include "2OT.h" +class wiy { + public: + std::C::wmd eyb(); +}; +template <typename wpa> +void i(wpa fg) { + std::C::jv<std::C::wmd> zs; + zs = ciy::uvo(fg.eyb(), '\n'); +} +namespace ciy { +namespace xqk { +struct sbv; +std::C::jv<sbv> ns() { + std::C::jv<sbv> ubs; + ubs.gq(); + return ubs; +} +} // namespace xqk +} // namespace ciy +void s() { + wiy fg; + i(fg); +} diff --git a/clang/test/SemaCUDA/vararg.cu b/clang/test/SemaCUDA/vararg.cu index 0238f42..62693e1 100644 --- a/clang/test/SemaCUDA/vararg.cu +++ b/clang/test/SemaCUDA/vararg.cu @@ -1,11 +1,9 @@ // REQUIRES: x86-registered-target // REQUIRES: nvptx-registered-target // RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device -fsyntax-only \ -// RUN: -verify -DEXPECT_VA_ARG_ERR -DEXPECT_VARARG_ERR %s +// RUN: -verify -DEXPECT_VA_ARG_ERR %s // RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device -fsyntax-only \ // RUN: -fcuda-allow-variadic-functions -verify -DEXPECT_VA_ARG_ERR %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fsyntax-only -verify \ -// RUN: -DEXPECT_VARARG_ERR %s #include <stdarg.h> #include "Inputs/cuda.h" @@ -30,28 +28,15 @@ __device__ void baz() { #endif } -__device__ void vararg(const char* x, ...) {} -#ifdef EXPECT_VARARG_ERR -// expected-error@-2 {{CUDA device code does not support variadic functions}} -#endif +__device__ void vararg(const char* x, ...) {} // OK template <typename T> -__device__ void vararg(T t, ...) {} -#ifdef EXPECT_VARARG_ERR -// expected-error@-2 {{CUDA device code does not support variadic functions}} -#endif +__device__ void vararg(T t, ...) {} // OK extern "C" __device__ int printf(const char* fmt, ...); // OK, special case. -// Definition of printf not allowed. -extern "C" __device__ int printf(const char* fmt, ...) { return 0; } -#ifdef EXPECT_VARARG_ERR -// expected-error@-2 {{CUDA device code does not support variadic functions}} -#endif +extern "C" __device__ int printf(const char* fmt, ...) { return 0; } // OK namespace ns { -__device__ int printf(const char* fmt, ...); -#ifdef EXPECT_VARARG_ERR -// expected-error@-2 {{CUDA device code does not support variadic functions}} -#endif +__device__ int printf(const char* fmt, ...); // OK } diff --git a/clang/test/SemaCXX/constant-expression-p2280r4.cpp b/clang/test/SemaCXX/constant-expression-p2280r4.cpp index 78e2e17..5cbfaff 100644 --- a/clang/test/SemaCXX/constant-expression-p2280r4.cpp +++ b/clang/test/SemaCXX/constant-expression-p2280r4.cpp @@ -44,7 +44,7 @@ void splash(Swim& swam) { // nointerpreter-note {{declared here} static_assert(how_many(swam) == 28); // ok static_assert(Swim().lochte() == 12); // ok static_assert(swam.lochte() == 12); // expected-error {{static assertion expression is not an integral constant expression}} \ - // nointerpreter-note {{virtual function called on object 'swam' whose dynamic type is not constant}} + // expected-note {{virtual function called on object 'swam' whose dynamic type is not constant}} static_assert(swam.coughlin == 12); // expected-error {{static assertion expression is not an integral constant expression}} \ // nointerpreter-note {{read of variable 'swam' whose value is not known}} } diff --git a/clang/test/SemaHLSL/Language/AggregateSplatCast-errors.hlsl b/clang/test/SemaHLSL/Language/AggregateSplatCast-errors.hlsl index 2320e13..fbb47bd 100644 --- a/clang/test/SemaHLSL/Language/AggregateSplatCast-errors.hlsl +++ b/clang/test/SemaHLSL/Language/AggregateSplatCast-errors.hlsl @@ -13,12 +13,6 @@ struct R { }; }; -// casting types which contain bitfields is not yet supported. -export void cantCast() { - S s = (S)1; - // expected-error@-1 {{no matching conversion for C-style cast from 'int' to 'S'}} -} - // Can't cast a union export void cantCast2() { R r = (R)1; diff --git a/clang/test/SemaHLSL/Language/ElementwiseCast-errors.hlsl b/clang/test/SemaHLSL/Language/ElementwiseCast-errors.hlsl index 3059150..d9f50e9 100644 --- a/clang/test/SemaHLSL/Language/ElementwiseCast-errors.hlsl +++ b/clang/test/SemaHLSL/Language/ElementwiseCast-errors.hlsl @@ -7,27 +7,6 @@ export void cantCast() { // expected-error@-1 {{C-style cast from 'int[3]' to 'int[4]' is not allowed}} } -struct S { -// expected-note@-1 {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'int2' (aka 'vector<int, 2>') to 'const S' for 1st argument}} -// expected-note@-2 {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'int2' (aka 'vector<int, 2>') to 'S' for 1st argument}} -// expected-note@-3 {{candidate constructor (the implicit default constructor) not viable: requires 0 arguments, but 1 was provided}} - int A : 8; - int B; -}; - -// casting types which contain bitfields is not yet supported. -export void cantCast2() { - S s = {1,2}; - int2 C = (int2)s; - // expected-error@-1 {{cannot convert 'S' to 'int2' (aka 'vector<int, 2>') without a conversion operator}} -} - -export void cantCast3() { - int2 C = {1,2}; - S s = (S)C; - // expected-error@-1 {{no matching conversion for C-style cast from 'int2' (aka 'vector<int, 2>') to 'S'}} -} - struct R { // expected-note@-1 {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'int2' (aka 'vector<int, 2>') to 'const R' for 1st argument}} // expected-note@-2 {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'int2' (aka 'vector<int, 2>') to 'R' for 1st argument}} |