diff options
102 files changed, 2883 insertions, 1831 deletions
diff --git a/clang/include/clang/Basic/BuiltinsNVPTX.td b/clang/include/clang/Basic/BuiltinsNVPTX.td index 2d6fa17..d923d2a 100644 --- a/clang/include/clang/Basic/BuiltinsNVPTX.td +++ b/clang/include/clang/Basic/BuiltinsNVPTX.td @@ -579,11 +579,35 @@ def __nvvm_ff2bf16x2_rn : NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(float, float) def __nvvm_ff2bf16x2_rn_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(float, float)", SM_80, PTX70>; def __nvvm_ff2bf16x2_rz : NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(float, float)", SM_80, PTX70>; def __nvvm_ff2bf16x2_rz_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(float, float)", SM_80, PTX70>; +def __nvvm_ff2bf16x2_rs : + NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(float, float, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_ff2bf16x2_rs_relu : + NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(float, float, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_ff2bf16x2_rs_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(float, float, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_ff2bf16x2_rs_relu_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(float, float, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; def __nvvm_ff2f16x2_rn : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(float, float)", SM_80, PTX70>; def __nvvm_ff2f16x2_rn_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(float, float)", SM_80, PTX70>; def __nvvm_ff2f16x2_rz : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(float, float)", SM_80, PTX70>; def __nvvm_ff2f16x2_rz_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(float, float)", SM_80, PTX70>; +def __nvvm_ff2f16x2_rs : + NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(float, float, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_ff2f16x2_rs_relu : + NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(float, float, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_ff2f16x2_rs_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(float, float, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_ff2f16x2_rs_relu_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(float, float, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; def __nvvm_f2bf16_rn : NVPTXBuiltinSMAndPTX<"__bf16(float)", SM_80, PTX70>; def __nvvm_f2bf16_rn_relu : NVPTXBuiltinSMAndPTX<"__bf16(float)", SM_80, PTX70>; @@ -616,6 +640,19 @@ def __nvvm_e4m3x2_to_f16x2_rn_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(sh def __nvvm_e5m2x2_to_f16x2_rn : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(short)", SM_89, PTX81>; def __nvvm_e5m2x2_to_f16x2_rn_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(short)", SM_89, PTX81>; +def __nvvm_f32x4_to_e4m3x4_rs_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<4, char>(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_f32x4_to_e4m3x4_rs_relu_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<4, char>(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_f32x4_to_e5m2x4_rs_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<4, char>(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_f32x4_to_e5m2x4_rs_relu_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<4, char>(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; + def __nvvm_ff_to_e2m3x2_rn_satfinite : NVPTXBuiltinSMAndPTX<"short(float, float)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; def __nvvm_ff_to_e2m3x2_rn_relu_satfinite : NVPTXBuiltinSMAndPTX<"short(float, float)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; def __nvvm_ff_to_e3m2x2_rn_satfinite : NVPTXBuiltinSMAndPTX<"short(float, float)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; @@ -626,12 +663,32 @@ def __nvvm_e2m3x2_to_f16x2_rn_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(sh def __nvvm_e3m2x2_to_f16x2_rn : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(short)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; def __nvvm_e3m2x2_to_f16x2_rn_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(short)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; +def __nvvm_f32x4_to_e2m3x4_rs_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<4, char>(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_f32x4_to_e2m3x4_rs_relu_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<4, char>(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_f32x4_to_e3m2x4_rs_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<4, char>(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_f32x4_to_e3m2x4_rs_relu_satfinite : + NVPTXBuiltinSMAndPTX<"_Vector<4, char>(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; + def __nvvm_ff_to_e2m1x2_rn_satfinite : NVPTXBuiltinSMAndPTX<"short(float, float)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; def __nvvm_ff_to_e2m1x2_rn_relu_satfinite : NVPTXBuiltinSMAndPTX<"short(float, float)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; def __nvvm_e2m1x2_to_f16x2_rn : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(short)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; def __nvvm_e2m1x2_to_f16x2_rn_relu : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(short)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; +def __nvvm_f32x4_to_e2m1x4_rs_satfinite : + NVPTXBuiltinSMAndPTX<"short(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; +def __nvvm_f32x4_to_e2m1x4_rs_relu_satfinite : + NVPTXBuiltinSMAndPTX<"short(_Vector<4, float>, uint32_t)", + SM<"100a", [SM_103a]>, PTX87>; + def __nvvm_ff_to_ue8m0x2_rz : NVPTXBuiltinSMAndPTX<"short(float, float)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; def __nvvm_ff_to_ue8m0x2_rz_satfinite : NVPTXBuiltinSMAndPTX<"short(float, float)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; def __nvvm_ff_to_ue8m0x2_rp : NVPTXBuiltinSMAndPTX<"short(float, float)", SM<"100a", [SM_101a, SM_120a]>, PTX86>; diff --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def index 84f5ab3..9e85008 100644 --- a/clang/include/clang/Basic/LangOptions.def +++ b/clang/include/clang/Basic/LangOptions.def @@ -245,7 +245,6 @@ LANGOPT(HLSLStrictAvailability, 1, 0, NotCompatible, LANGOPT(HLSLSpvUseUnknownImageFormat, 1, 0, NotCompatible, "For storage images and texel buffers, sets the default format to 'Unknown' when not specified via the `vk::image_format` attribute. If this option is not used, the format is inferred from the resource's data type.") LANGOPT(CUDAIsDevice , 1, 0, NotCompatible, "compiling for CUDA device") -LANGOPT(CUDAAllowVariadicFunctions, 1, 0, NotCompatible, "allowing variadic functions in CUDA device code") LANGOPT(CUDAHostDeviceConstexpr, 1, 1, NotCompatible, "treating unattributed constexpr functions as __host__ __device__") LANGOPT(GPUDeviceApproxTranscendentals, 1, 0, NotCompatible, "using approximate transcendental functions") LANGOPT(GPURelocatableDeviceCode, 1, 0, NotCompatible, "generate relocatable device code") diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 5a48f0b..9bfa1dd 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -8733,8 +8733,7 @@ def fcuda_include_gpubinary : Separate<["-"], "fcuda-include-gpubinary">, HelpText<"Incorporate CUDA device-side binary into host object file.">, MarshallingInfoString<CodeGenOpts<"CudaGpuBinaryFileName">>; def fcuda_allow_variadic_functions : Flag<["-"], "fcuda-allow-variadic-functions">, - HelpText<"Allow variadic functions in CUDA device code.">, - MarshallingInfoFlag<LangOpts<"CUDAAllowVariadicFunctions">>; + HelpText<"Deprecated; Allow variadic functions in CUDA device code.">; def fno_cuda_host_device_constexpr : Flag<["-"], "fno-cuda-host-device-constexpr">, HelpText<"Don't treat unattributed constexpr functions as __host__ __device__.">, MarshallingInfoNegativeFlag<LangOpts<"CUDAHostDeviceConstexpr">>; diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp index 68ebfdf..a3c4ba5 100644 --- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp +++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp @@ -736,25 +736,6 @@ static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, return true; } -/// rotateleft(value, amount) -static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC, - const InterpFrame *Frame, - const CallExpr *Call, bool Right) { - APSInt Amount = popToAPSInt(S, Call->getArg(1)); - APSInt Value = popToAPSInt(S, Call->getArg(0)); - - APSInt Result; - if (Right) - Result = APSInt(Value.rotr(Amount.urem(Value.getBitWidth())), - /*IsUnsigned=*/true); - else // Left. - Result = APSInt(Value.rotl(Amount.urem(Value.getBitWidth())), - /*IsUnsigned=*/true); - - pushInteger(S, Result, Call->getType()); - return true; -} - static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { @@ -3160,7 +3141,10 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, case Builtin::BI_rotl: case Builtin::BI_lrotl: case Builtin::BI_rotl64: - return interp__builtin_rotate(S, OpPC, Frame, Call, /*Right=*/false); + return interp__builtin_elementwise_int_binop( + S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) -> APInt { + return Value.rotl(Amount); + }); case Builtin::BI__builtin_rotateright8: case Builtin::BI__builtin_rotateright16: @@ -3171,7 +3155,10 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, case Builtin::BI_rotr: case Builtin::BI_lrotr: case Builtin::BI_rotr64: - return interp__builtin_rotate(S, OpPC, Frame, Call, /*Right=*/true); + return interp__builtin_elementwise_int_binop( + S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) -> APInt { + return Value.rotr(Amount); + }); case Builtin::BI__builtin_ffs: case Builtin::BI__builtin_ffsl: diff --git a/clang/lib/Headers/avx512vlintrin.h b/clang/lib/Headers/avx512vlintrin.h index 754f43a..965741f 100644 --- a/clang/lib/Headers/avx512vlintrin.h +++ b/clang/lib/Headers/avx512vlintrin.h @@ -7330,9 +7330,8 @@ _mm256_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) __builtin_ia32_pmovusqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_cvtepi32_epi8 (__m128i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_cvtepi32_epi8(__m128i __A) { return (__m128i)__builtin_shufflevector( __builtin_convertvector((__v4si)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7); @@ -7360,9 +7359,8 @@ _mm_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) __builtin_ia32_pmovdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi32_epi8 (__m256i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_cvtepi32_epi8(__m256i __A) { return (__m128i)__builtin_shufflevector( __builtin_convertvector((__v8si)__A, __v8qi), (__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, @@ -7370,8 +7368,7 @@ _mm256_cvtepi32_epi8 (__m256i __A) } static __inline__ __m128i __DEFAULT_FN_ATTRS256 -_mm256_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A) -{ +_mm256_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A, (__v16qi) __O, __M); } @@ -7390,9 +7387,8 @@ _mm256_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) __builtin_ia32_pmovdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_cvtepi32_epi16 (__m128i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_cvtepi32_epi16(__m128i __A) { return (__m128i)__builtin_shufflevector( __builtin_convertvector((__v4si)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7); @@ -7419,9 +7415,8 @@ _mm_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) __builtin_ia32_pmovdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi32_epi16 (__m256i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_cvtepi32_epi16(__m256i __A) { return (__m128i)__builtin_convertvector((__v8si)__A, __v8hi); } @@ -7446,9 +7441,8 @@ _mm256_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) __builtin_ia32_pmovdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_cvtepi64_epi8 (__m128i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_cvtepi64_epi8(__m128i __A) { return (__m128i)__builtin_shufflevector( __builtin_convertvector((__v2di)__A, __v2qi), (__v2qi){0, 0}, 0, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3); @@ -7475,9 +7469,8 @@ _mm_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) __builtin_ia32_pmovqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi64_epi8 (__m256i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_cvtepi64_epi8(__m256i __A) { return (__m128i)__builtin_shufflevector( __builtin_convertvector((__v4di)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7); @@ -7504,9 +7497,8 @@ _mm256_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) __builtin_ia32_pmovqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_cvtepi64_epi32 (__m128i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_cvtepi64_epi32(__m128i __A) { return (__m128i)__builtin_shufflevector( __builtin_convertvector((__v2di)__A, __v2si), (__v2si){0, 0}, 0, 1, 2, 3); } @@ -7532,23 +7524,20 @@ _mm_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A) __builtin_ia32_pmovqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi64_epi32 (__m256i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_cvtepi64_epi32(__m256i __A) { return (__m128i)__builtin_convertvector((__v4di)__A, __v4si); } -static __inline__ __m128i __DEFAULT_FN_ATTRS256 -_mm256_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, (__v4si)_mm256_cvtepi64_epi32(__A), (__v4si)__O); } -static __inline__ __m128i __DEFAULT_FN_ATTRS256 -_mm256_maskz_cvtepi64_epi32 (__mmask8 __M, __m256i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A) { return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, (__v4si)_mm256_cvtepi64_epi32(__A), (__v4si)_mm_setzero_si128()); @@ -7560,9 +7549,8 @@ _mm256_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A) __builtin_ia32_pmovqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_cvtepi64_epi16 (__m128i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_cvtepi64_epi16(__m128i __A) { return (__m128i)__builtin_shufflevector( __builtin_convertvector((__v2di)__A, __v2hi), (__v2hi){0, 0}, 0, 1, 2, 3, 3, 3, 3, 3); @@ -7590,9 +7578,8 @@ _mm_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) __builtin_ia32_pmovqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M); } -static __inline__ __m128i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi64_epi16 (__m256i __A) -{ +static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_cvtepi64_epi16(__m256i __A) { return (__m128i)__builtin_shufflevector( __builtin_convertvector((__v4di)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7); diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp index 0069b08..6eaf7b9 100644 --- a/clang/lib/Sema/SemaDecl.cpp +++ b/clang/lib/Sema/SemaDecl.cpp @@ -11041,17 +11041,6 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, << CUDA().getConfigureFuncName(); Context.setcudaConfigureCallDecl(NewFD); } - - // Variadic functions, other than a *declaration* of printf, are not allowed - // in device-side CUDA code, unless someone passed - // -fcuda-allow-variadic-functions. - if (!getLangOpts().CUDAAllowVariadicFunctions && NewFD->isVariadic() && - (NewFD->hasAttr<CUDADeviceAttr>() || - NewFD->hasAttr<CUDAGlobalAttr>()) && - !(II && II->isStr("printf") && NewFD->isExternC() && - !D.isFunctionDefinition())) { - Diag(NewFD->getLocation(), diag::err_variadic_device_fn); - } } MarkUnusedFileScopedDecl(NewFD); diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp index b870114..5657dfe 100644 --- a/clang/lib/Sema/SemaOverload.cpp +++ b/clang/lib/Sema/SemaOverload.cpp @@ -4413,14 +4413,23 @@ CompareImplicitConversionSequences(Sema &S, SourceLocation Loc, Result = CompareStandardConversionSequences(S, Loc, ICS1.Standard, ICS2.Standard); else if (ICS1.isUserDefined()) { + // With lazy template loading, it is possible to find non-canonical + // FunctionDecls, depending on when redecl chains are completed. Make sure + // to compare the canonical decls of conversion functions. This avoids + // ambiguity problems for templated conversion operators. + const FunctionDecl *ConvFunc1 = ICS1.UserDefined.ConversionFunction; + if (ConvFunc1) + ConvFunc1 = ConvFunc1->getCanonicalDecl(); + const FunctionDecl *ConvFunc2 = ICS2.UserDefined.ConversionFunction; + if (ConvFunc2) + ConvFunc2 = ConvFunc2->getCanonicalDecl(); // User-defined conversion sequence U1 is a better conversion // sequence than another user-defined conversion sequence U2 if // they contain the same user-defined conversion function or // constructor and if the second standard conversion sequence of // U1 is better than the second standard conversion sequence of // U2 (C++ 13.3.3.2p3). - if (ICS1.UserDefined.ConversionFunction == - ICS2.UserDefined.ConversionFunction) + if (ConvFunc1 == ConvFunc2) Result = CompareStandardConversionSequences(S, Loc, ICS1.UserDefined.After, ICS2.UserDefined.After); diff --git a/clang/test/CodeGen/X86/avx512vl-builtins.c b/clang/test/CodeGen/X86/avx512vl-builtins.c index 8800623..6d91870 100644 --- a/clang/test/CodeGen/X86/avx512vl-builtins.c +++ b/clang/test/CodeGen/X86/avx512vl-builtins.c @@ -9272,6 +9272,8 @@ __m128i test_mm_cvtepi32_epi8(__m128i __A) { return _mm_cvtepi32_epi8(__A); } +TEST_CONSTEXPR(match_v16qi(_mm_cvtepi32_epi8((__m128i)(__v4si){1, 2, 3, 4}), 1 ,2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); + __m128i test_mm_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A) { // CHECK-LABEL: test_mm_mask_cvtepi32_epi8 // CHECK: @llvm.x86.avx512.mask.pmov.db.128 @@ -9297,6 +9299,8 @@ __m128i test_mm256_cvtepi32_epi8(__m256i __A) { return _mm256_cvtepi32_epi8(__A); } +TEST_CONSTEXPR(match_v16qi(_mm256_cvtepi32_epi8((__m256i)(__v8si){1, 2, 3, 4, 5, 6, 7, 8}), 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0 ,0)); + __m128i test_mm256_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A) { // CHECK-LABEL: test_mm256_mask_cvtepi32_epi8 // CHECK: @llvm.x86.avx512.mask.pmov.db.256 @@ -9322,6 +9326,8 @@ __m128i test_mm_cvtepi32_epi16(__m128i __A) { return _mm_cvtepi32_epi16(__A); } +TEST_CONSTEXPR(match_v8hi(_mm_cvtepi32_epi16((__m128i)(__v4si){1, 2, 3, 4}), 1 ,2, 3, 4, 0, 0, 0, 0)); + __m128i test_mm_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A) { // CHECK-LABEL: test_mm_mask_cvtepi32_epi16 // CHECK: @llvm.x86.avx512.mask.pmov.dw.128 @@ -9346,6 +9352,8 @@ __m128i test_mm256_cvtepi32_epi16(__m256i __A) { return _mm256_cvtepi32_epi16(__A); } +TEST_CONSTEXPR(match_v8hi(_mm256_cvtepi32_epi16((__m256i)(__v8si){1, 2, 3, 4, 5, 6, 7, 8}), 1, 2, 3, 4, 5, 6, 7, 8)); + __m128i test_mm256_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A) { // CHECK-LABEL: test_mm256_mask_cvtepi32_epi16 // CHECK: @llvm.x86.avx512.mask.pmov.dw.256 @@ -9371,6 +9379,8 @@ __m128i test_mm_cvtepi64_epi8(__m128i __A) { return _mm_cvtepi64_epi8(__A); } +TEST_CONSTEXPR(match_v16qi(_mm_cvtepi64_epi8((__m128i)(__v2di){1, 2}), 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); + __m128i test_mm_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A) { // CHECK-LABEL: test_mm_mask_cvtepi64_epi8 // CHECK: @llvm.x86.avx512.mask.pmov.qb.128 @@ -9396,6 +9406,8 @@ __m128i test_mm256_cvtepi64_epi8(__m256i __A) { return _mm256_cvtepi64_epi8(__A); } +TEST_CONSTEXPR(match_v16qi(_mm256_cvtepi64_epi8((__m256i)(__v4di){1, 2, 3, 4}), 1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); + __m128i test_mm256_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A) { // CHECK-LABEL: test_mm256_mask_cvtepi64_epi8 // CHECK: @llvm.x86.avx512.mask.pmov.qb.256 @@ -9421,6 +9433,8 @@ __m128i test_mm_cvtepi64_epi32(__m128i __A) { return _mm_cvtepi64_epi32(__A); } +TEST_CONSTEXPR(match_v4si(_mm_cvtepi64_epi32((__m128i)(__v2di){1, 2}),1, 2, 0, 0)); + __m128i test_mm_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A) { // CHECK-LABEL: test_mm_mask_cvtepi64_epi32 // CHECK: @llvm.x86.avx512.mask.pmov.qd.128 @@ -9445,6 +9459,8 @@ __m128i test_mm256_cvtepi64_epi32(__m256i __A) { return _mm256_cvtepi64_epi32(__A); } +TEST_CONSTEXPR(match_v4si(_mm256_cvtepi64_epi32((__m256i)(__v4di){1 ,2 ,3 ,4}), 1, 2, 3, 4)); + __m128i test_mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) { // CHECK-LABEL: test_mm256_mask_cvtepi64_epi32 // CHECK: trunc <4 x i64> %{{.*}} to <4 x i32> @@ -9452,6 +9468,8 @@ __m128i test_mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A) { return _mm256_mask_cvtepi64_epi32(__O, __M, __A); } +TEST_CONSTEXPR(match_v4si(_mm256_mask_cvtepi64_epi32(_mm_set1_epi32(-777), 0xA,(__m256i)(__v4di){1, -2, 3, -4}), -777, -2, -777, -4)); + __m128i test_mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A) { // CHECK-LABEL: test_mm256_maskz_cvtepi64_epi32 // CHECK: trunc <4 x i64> %{{.*}} to <4 x i32> @@ -9459,6 +9477,8 @@ __m128i test_mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A) { return _mm256_maskz_cvtepi64_epi32(__M, __A); } +TEST_CONSTEXPR(match_v4si(_mm256_maskz_cvtepi64_epi32( 0xA,(__m256i)(__v4di){1, -2, 3, -4}),0 , -2, 0, -4)); + void test_mm256_mask_cvtepi64_storeu_epi32(void * __P, __mmask8 __M, __m256i __A) { // CHECK-LABEL: test_mm256_mask_cvtepi64_storeu_epi32 // CHECK: @llvm.x86.avx512.mask.pmov.qd.mem.256 @@ -9472,6 +9492,8 @@ __m128i test_mm_cvtepi64_epi16(__m128i __A) { return _mm_cvtepi64_epi16(__A); } +TEST_CONSTEXPR(match_v8hi(_mm_cvtepi64_epi16((__m128i)(__v2di){1, 2}),1, 2, 0, 0, 0, 0, 0, 0)); + __m128i test_mm_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A) { // CHECK-LABEL: test_mm_mask_cvtepi64_epi16 // CHECK: @llvm.x86.avx512.mask.pmov.qw.128 @@ -9497,6 +9519,8 @@ __m128i test_mm256_cvtepi64_epi16(__m256i __A) { return _mm256_cvtepi64_epi16(__A); } +TEST_CONSTEXPR(match_v8hi(_mm256_cvtepi64_epi16((__m256i)(__v4di){1 ,2, 3, 4}),1, 2, 3, 4, 0, 0, 0, 0)); + __m128i test_mm256_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A) { // CHECK-LABEL: test_mm256_mask_cvtepi64_epi16 // CHECK: @llvm.x86.avx512.mask.pmov.qw.256 diff --git a/clang/test/CodeGen/builtins-nvptx.c b/clang/test/CodeGen/builtins-nvptx.c index f994adb..e3be262 100644 --- a/clang/test/CodeGen/builtins-nvptx.c +++ b/clang/test/CodeGen/builtins-nvptx.c @@ -43,6 +43,12 @@ // RUN: %clang_cc1 -ffp-contract=off -triple nvptx64-unknown-unknown -target-cpu sm_120a -target-feature +ptx86 -DPTX=86 \ // RUN: -disable-llvm-optzns -fcuda-is-device -emit-llvm -o - -x cuda %s \ // RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX86_SM120a %s +// RUN: %clang_cc1 -ffp-contract=off -triple nvptx64-unknown-unknown -target-cpu sm_103a -target-feature +ptx87 -DPTX=87 \ +// RUN: -disable-llvm-optzns -fcuda-is-device -emit-llvm -o - -x cuda %s \ +// RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX87_SM103a %s +// RUN: %clang_cc1 -ffp-contract=off -triple nvptx64-unknown-unknown -target-cpu sm_100a -target-feature +ptx87 -DPTX=87 \ +// RUN: -disable-llvm-optzns -fcuda-is-device -emit-llvm -o - -x cuda %s \ +// RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX87_SM100a %s // ### The last run to check with the highest SM and PTX version available // ### to make sure target builtins are still accepted. // RUN: %clang_cc1 -ffp-contract=off -triple nvptx64-unknown-unknown -target-cpu sm_120a -target-feature +ptx87 -DPTX=87 \ @@ -1203,6 +1209,123 @@ __device__ void nvvm_cvt_sm100a_sm101a_sm120a() { // CHECK: ret void } +__device__ void nvvm_cvt_sm100a_sm103a() { +#if (PTX >= 87) && (__CUDA_ARCH_FEAT_SM100_ALL || __CUDA_ARCH_FEAT_SM103_ALL) + + typedef __fp16 f16x2 __attribute__((ext_vector_type(2))); + typedef __bf16 bf16x2 __attribute__((ext_vector_type(2))); + typedef char uint8x4 __attribute__((ext_vector_type(4))); + +// CHECK_PTX87_SM100a: %[[R1:.*]] = call <2 x half> @llvm.nvvm.ff2f16x2.rs(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM100a: store <2 x half> %[[R1]], ptr %r1 +// CHECK_PTX87_SM103a: %[[R1:.*]] = call <2 x half> @llvm.nvvm.ff2f16x2.rs(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM103a: store <2 x half> %[[R1]], ptr %r1 + f16x2 r1 = __nvvm_ff2f16x2_rs(1.0f, 1.0f, 0); + +// CHECK_PTX87_SM100a: %[[R2:.*]] = call <2 x half> @llvm.nvvm.ff2f16x2.rs.relu(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM100a: store <2 x half> %[[R2]], ptr %r2 +// CHECK_PTX87_SM103a: %[[R2:.*]] = call <2 x half> @llvm.nvvm.ff2f16x2.rs.relu(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM103a: store <2 x half> %[[R2]], ptr %r2 + f16x2 r2 = __nvvm_ff2f16x2_rs_relu(1.0f, 1.0f, 0); + +// CHECK_PTX87_SM100a: %[[R3:.*]] = call <2 x half> @llvm.nvvm.ff2f16x2.rs.satfinite(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM100a: store <2 x half> %[[R3]], ptr %r3 +// CHECK_PTX87_SM103a: %[[R3:.*]] = call <2 x half> @llvm.nvvm.ff2f16x2.rs.satfinite(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM103a: store <2 x half> %[[R3]], ptr %r3 + f16x2 r3 = __nvvm_ff2f16x2_rs_satfinite(1.0f, 1.0f, 0); + +// CHECK_PTX87_SM100a: %[[R4:.*]] = call <2 x half> @llvm.nvvm.ff2f16x2.rs.relu.satfinite(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM100a: store <2 x half> %[[R4]], ptr %r4 +// CHECK_PTX87_SM103a: %[[R4:.*]] = call <2 x half> @llvm.nvvm.ff2f16x2.rs.relu.satfinite(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM103a: store <2 x half> %[[R4]], ptr %r4 + f16x2 r4 = __nvvm_ff2f16x2_rs_relu_satfinite(1.0f, 1.0f, 0); + +// CHECK_PTX87_SM100a: %[[R5:.*]] = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM100a: store <2 x bfloat> %[[R5]], ptr %r5 +// CHECK_PTX87_SM103a: %[[R5:.*]] = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM103a: store <2 x bfloat> %[[R5]], ptr %r5 + bf16x2 r5 = __nvvm_ff2bf16x2_rs(1.0f, 1.0f, 0); + +// CHECK_PTX87_SM100a: %[[R6:.*]] = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.relu(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM100a: store <2 x bfloat> %[[R6]], ptr %r6 +// CHECK_PTX87_SM103a: %[[R6:.*]] = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.relu(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM103a: store <2 x bfloat> %[[R6]], ptr %r6 + bf16x2 r6 = __nvvm_ff2bf16x2_rs_relu(1.0f, 1.0f, 0); + +// CHECK_PTX87_SM100a: %[[R7:.*]] = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.satfinite(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM100a: store <2 x bfloat> %[[R7]], ptr %r7 +// CHECK_PTX87_SM103a: %[[R7:.*]] = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.satfinite(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM103a: store <2 x bfloat> %[[R7]], ptr %r7 + bf16x2 r7 = __nvvm_ff2bf16x2_rs_satfinite(1.0f, 1.0f, 0); + +// CHECK_PTX87_SM100a: %[[R8:.*]] = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.relu.satfinite(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM100a: store <2 x bfloat> %[[R8]], ptr %r8 +// CHECK_PTX87_SM103a: %[[R8:.*]] = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.relu.satfinite(float 1.000000e+00, float 1.000000e+00, i32 0) +// CHECK_PTX87_SM103a: store <2 x bfloat> %[[R8]], ptr %r8 + bf16x2 r8 = __nvvm_ff2bf16x2_rs_relu_satfinite(1.0f, 1.0f, 0); + +// CHECK_PTX87_SM100a: %[[R9:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e4m3x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store <4 x i8> %[[R9]], ptr %r9 +// CHECK_PTX87_SM103a: %[[R9:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e4m3x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store <4 x i8> %[[R9]], ptr %r9 + uint8x4 r9 = __nvvm_f32x4_to_e4m3x4_rs_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R10:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e4m3x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store <4 x i8> %[[R10]], ptr %r10 +// CHECK_PTX87_SM103a: %[[R10:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e4m3x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store <4 x i8> %[[R10]], ptr %r10 + uint8x4 r10 = __nvvm_f32x4_to_e4m3x4_rs_relu_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R11:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e5m2x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store <4 x i8> %[[R11]], ptr %r11 +// CHECK_PTX87_SM103a: %[[R11:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e5m2x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store <4 x i8> %[[R11]], ptr %r11 + uint8x4 r11 = __nvvm_f32x4_to_e5m2x4_rs_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R12:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e5m2x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store <4 x i8> %[[R12]], ptr %r12 +// CHECK_PTX87_SM103a: %[[R12:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e5m2x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store <4 x i8> %[[R12]], ptr %r12 + uint8x4 r12 = __nvvm_f32x4_to_e5m2x4_rs_relu_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R13:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e2m3x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store <4 x i8> %[[R13]], ptr %r13 +// CHECK_PTX87_SM103a: %[[R13:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e2m3x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store <4 x i8> %[[R13]], ptr %r13 + uint8x4 r13 = __nvvm_f32x4_to_e2m3x4_rs_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R14:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e2m3x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store <4 x i8> %[[R14]], ptr %r14 +// CHECK_PTX87_SM103a: %[[R14:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e2m3x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store <4 x i8> %[[R14]], ptr %r14 + uint8x4 r14 = __nvvm_f32x4_to_e2m3x4_rs_relu_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R15:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e3m2x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store <4 x i8> %[[R15]], ptr %r15 +// CHECK_PTX87_SM103a: %[[R15:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e3m2x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store <4 x i8> %[[R15]], ptr %r15 + uint8x4 r15 = __nvvm_f32x4_to_e3m2x4_rs_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R16:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e3m2x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store <4 x i8> %[[R16]], ptr %r16 +// CHECK_PTX87_SM103a: %[[R16:.*]] = call <4 x i8> @llvm.nvvm.f32x4.to.e3m2x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store <4 x i8> %[[R16]], ptr %r16 + uint8x4 r16 = __nvvm_f32x4_to_e3m2x4_rs_relu_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R17:.*]] = call i16 @llvm.nvvm.f32x4.to.e2m1x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store i16 %[[R17]], ptr %r17 +// CHECK_PTX87_SM103a: %[[R17:.*]] = call i16 @llvm.nvvm.f32x4.to.e2m1x4.rs.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store i16 %[[R17]], ptr %r17 + short r17 = __nvvm_f32x4_to_e2m1x4_rs_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); + +// CHECK_PTX87_SM100a: %[[R18:.*]] = call i16 @llvm.nvvm.f32x4.to.e2m1x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM100a: store i16 %[[R18]], ptr %r18 +// CHECK_PTX87_SM103a: %[[R18:.*]] = call i16 @llvm.nvvm.f32x4.to.e2m1x4.rs.relu.satfinite(<4 x float> splat (float 1.000000e+00), i32 0) +// CHECK_PTX87_SM103a: store i16 %[[R18]], ptr %r18 + short r18 = __nvvm_f32x4_to_e2m1x4_rs_relu_satfinite({1.0f, 1.0f, 1.0f, 1.0f}, 0); +#endif +} + #define NAN32 0x7FBFFFFF #define NAN16 (__bf16)0x7FBF #define BF16 (__bf16)0.1f diff --git a/clang/test/CodeGenOpenCL/amdgpu-features.cl b/clang/test/CodeGenOpenCL/amdgpu-features.cl index af1ef64..c0c22bc 100644 --- a/clang/test/CodeGenOpenCL/amdgpu-features.cl +++ b/clang/test/CodeGenOpenCL/amdgpu-features.cl @@ -109,8 +109,8 @@ // GFX1153: "target-features"="+16-bit-insts,+atomic-fadd-rtn-insts,+atomic-fmin-fmax-global-f32,+ci-insts,+dl-insts,+dot10-insts,+dot12-insts,+dot5-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32" // GFX1200: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot10-insts,+dot11-insts,+dot12-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+fp8-conversion-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32" // GFX1201: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot10-insts,+dot11-insts,+dot12-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+fp8-conversion-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32" -// GFX1250: "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32" -// GFX1251: "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32" +// GFX1250: "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+cluster,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32" +// GFX1251: "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+cluster,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32" // GFX1103-W64: "target-features"="+16-bit-insts,+atomic-fadd-rtn-insts,+atomic-fmin-fmax-global-f32,+ci-insts,+dl-insts,+dot10-insts,+dot12-insts,+dot5-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize64" diff --git a/clang/test/Headers/arm-acle-header.c b/clang/test/Headers/arm-acle-header.c index fea8472..58fcc66 100644 --- a/clang/test/Headers/arm-acle-header.c +++ b/clang/test/Headers/arm-acle-header.c @@ -10,6 +10,8 @@ // RUN: %clang_cc1 -x c++ -triple arm64ec-windows -target-cpu cortex-a53 -fsyntax-only -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=19.11 %s // expected-no-diagnostics +#include "system_reserved_names.h" + #include <arm_acle.h> #ifdef _MSC_VER #include <intrin.h> diff --git a/clang/test/Headers/arm-cde-header.c b/clang/test/Headers/arm-cde-header.c index 1f60368..526202a 100644 --- a/clang/test/Headers/arm-cde-header.c +++ b/clang/test/Headers/arm-cde-header.c @@ -9,5 +9,7 @@ // Check that the headers don't conflict with each other +#include "system_reserved_names.h" + #include <arm_cde.h> #include <arm_mve.h> diff --git a/clang/test/Headers/arm-cmse-header.c b/clang/test/Headers/arm-cmse-header.c index 862572d..c21c1ff 100644 --- a/clang/test/Headers/arm-cmse-header.c +++ b/clang/test/Headers/arm-cmse-header.c @@ -2,6 +2,8 @@ // RUN: %clang_cc1 -triple thumbv8m.base-eabi -fsyntax-only -ffreestanding -x c++ %s -verify -mcmse // expected-no-diagnostics +#include "system_reserved_names.h" + #include <arm_cmse.h> typedef void (*callback_t)(void); diff --git a/clang/test/Headers/arm-fp16-header.c b/clang/test/Headers/arm-fp16-header.c index b1a87fa..e472654 100644 --- a/clang/test/Headers/arm-fp16-header.c +++ b/clang/test/Headers/arm-fp16-header.c @@ -18,4 +18,6 @@ // REQUIRES: aarch64-registered-target || arm-registered-target +#include "system_reserved_names.h" + #include <arm_fp16.h> diff --git a/clang/test/Headers/arm-neon-header.c b/clang/test/Headers/arm-neon-header.c index 89bd5aa..43b1b35 100644 --- a/clang/test/Headers/arm-neon-header.c +++ b/clang/test/Headers/arm-neon-header.c @@ -26,4 +26,6 @@ // REQUIRES: aarch64-registered-target || arm-registered-target +#include "system_reserved_names.h" + #include <arm_neon.h> diff --git a/clang/test/Headers/system_reserved_names.h b/clang/test/Headers/system_reserved_names.h new file mode 100644 index 0000000..1a53f4f --- /dev/null +++ b/clang/test/Headers/system_reserved_names.h @@ -0,0 +1,165 @@ +// Test that headers are not tripped up by the surrounding code defining various +// alphabetic macros. Also ensure that we don't swallow the definition of user +// provided macros (in other words, ensure that we push/pop correctly everywhere). +// +// The contents of this header is a lightly trimmed version of +// libcxx/test/libcxx/system_reserved_names.gen.py; additions to that testcase +// can be synced into this header as well. + +#define SYSTEM_RESERVED_NAME This name should not be used in Clang headers + +// libc++ does not use single-letter names as a matter of principle. +// But Windows' own <wchar.h>, <math.h>, and <exception> use many of these +// (at least C,E,F,I,M,N,P,S,X,Y,Z) as uglified function parameter names, +// so don't define these on Windows. +// +#ifndef _WIN32 +#define _A SYSTEM_RESERVED_NAME +#define _B SYSTEM_RESERVED_NAME +#define _C SYSTEM_RESERVED_NAME +#define _D SYSTEM_RESERVED_NAME +#define _E SYSTEM_RESERVED_NAME +#define _F SYSTEM_RESERVED_NAME +#define _G SYSTEM_RESERVED_NAME +#define _H SYSTEM_RESERVED_NAME +#define _I SYSTEM_RESERVED_NAME +#define _J SYSTEM_RESERVED_NAME +#define _K SYSTEM_RESERVED_NAME +#define _L SYSTEM_RESERVED_NAME +#define _M SYSTEM_RESERVED_NAME +#define _N SYSTEM_RESERVED_NAME +#define _O SYSTEM_RESERVED_NAME +#define _P SYSTEM_RESERVED_NAME +#define _Q SYSTEM_RESERVED_NAME +#define _R SYSTEM_RESERVED_NAME +#define _S SYSTEM_RESERVED_NAME +#define _T SYSTEM_RESERVED_NAME +#define _U SYSTEM_RESERVED_NAME +#define _V SYSTEM_RESERVED_NAME +#define _W SYSTEM_RESERVED_NAME +#define _X SYSTEM_RESERVED_NAME +#define _Y SYSTEM_RESERVED_NAME +#define _Z SYSTEM_RESERVED_NAME +#endif + +// FreeBSD's <sys/types.h> uses _M +// +#ifdef __FreeBSD__ +# undef _M +#endif + +// Test that libc++ doesn't use names that collide with FreeBSD system macros. +// newlib and picolibc also define these macros +#if !defined(__FreeBSD__) && !defined(_NEWLIB_VERSION) +# define __null_sentinel SYSTEM_RESERVED_NAME +# define __generic SYSTEM_RESERVED_NAME +#endif + +// tchar.h defines these macros on Windows +#ifndef _WIN32 +# define _UI SYSTEM_RESERVED_NAME +# define _PUC SYSTEM_RESERVED_NAME +# define _CPUC SYSTEM_RESERVED_NAME +# define _PC SYSTEM_RESERVED_NAME +# define _CRPC SYSTEM_RESERVED_NAME +# define _CPC SYSTEM_RESERVED_NAME +#endif + +// yvals.h on MINGW defines this macro +#ifndef _WIN32 +# define _C2 SYSTEM_RESERVED_NAME +#endif + +// Test that libc++ doesn't use names that collide with Win32 API macros. +// Obviously we can only define these on non-Windows platforms. +#ifndef _WIN32 +# define __allocator SYSTEM_RESERVED_NAME +# define __bound SYSTEM_RESERVED_NAME +# define __deallocate SYSTEM_RESERVED_NAME +# define __deref SYSTEM_RESERVED_NAME +# define __format_string SYSTEM_RESERVED_NAME +# define __full SYSTEM_RESERVED_NAME +# define __in SYSTEM_RESERVED_NAME +# define __inout SYSTEM_RESERVED_NAME +# define __nz SYSTEM_RESERVED_NAME +# define __out SYSTEM_RESERVED_NAME +# define __part SYSTEM_RESERVED_NAME +# define __post SYSTEM_RESERVED_NAME +# define __pre SYSTEM_RESERVED_NAME +#endif + +// Newlib & picolibc use __input as a parameter name of a64l & l64a +#ifndef _NEWLIB_VERSION +# define __input SYSTEM_RESERVED_NAME +#endif +#define __output SYSTEM_RESERVED_NAME + +#define __acquire SYSTEM_RESERVED_NAME +#define __release SYSTEM_RESERVED_NAME + +// Android and FreeBSD use this for __attribute__((__unused__)) +#if !defined(__FreeBSD__) && !defined(__ANDROID__) +#define __unused SYSTEM_RESERVED_NAME +#endif + +// These names are not reserved, so the user can macro-define them. +// These are intended to find improperly _Uglified template parameters. +#define A SYSTEM_RESERVED_NAME +#define Arg SYSTEM_RESERVED_NAME +#define Args SYSTEM_RESERVED_NAME +#define As SYSTEM_RESERVED_NAME +#define B SYSTEM_RESERVED_NAME +#define Bs SYSTEM_RESERVED_NAME +#define C SYSTEM_RESERVED_NAME +#define Cp SYSTEM_RESERVED_NAME +#define Cs SYSTEM_RESERVED_NAME +// Windows setjmp.h contains a struct member named 'D' on ARM/AArch64. +#ifndef _WIN32 +# define D SYSTEM_RESERVED_NAME +#endif +#define Dp SYSTEM_RESERVED_NAME +#define Ds SYSTEM_RESERVED_NAME +#define E SYSTEM_RESERVED_NAME +#define Ep SYSTEM_RESERVED_NAME +#define Es SYSTEM_RESERVED_NAME +#define N SYSTEM_RESERVED_NAME +#define Np SYSTEM_RESERVED_NAME +#define Ns SYSTEM_RESERVED_NAME +#define R SYSTEM_RESERVED_NAME +#define Rp SYSTEM_RESERVED_NAME +#define Rs SYSTEM_RESERVED_NAME +#define T SYSTEM_RESERVED_NAME +#define Tp SYSTEM_RESERVED_NAME +#define Ts SYSTEM_RESERVED_NAME +#define Type SYSTEM_RESERVED_NAME +#define Types SYSTEM_RESERVED_NAME +#define U SYSTEM_RESERVED_NAME +#define Up SYSTEM_RESERVED_NAME +#define Us SYSTEM_RESERVED_NAME +#define V SYSTEM_RESERVED_NAME +#define Vp SYSTEM_RESERVED_NAME +#define Vs SYSTEM_RESERVED_NAME +#define X SYSTEM_RESERVED_NAME +#define Xp SYSTEM_RESERVED_NAME +#define Xs SYSTEM_RESERVED_NAME + +// The classic Windows min/max macros +#define min SYSTEM_RESERVED_NAME +#define max SYSTEM_RESERVED_NAME + +// Test to make sure curses has no conflicting macros with the standard library +#define move SYSTEM_RESERVED_NAME +#define erase SYSTEM_RESERVED_NAME +#define refresh SYSTEM_RESERVED_NAME + +// Dinkumware libc ctype.h uses these definitions +#define _XA SYSTEM_RESERVED_NAME +#define _XS SYSTEM_RESERVED_NAME +#define _BB SYSTEM_RESERVED_NAME +#define _CN SYSTEM_RESERVED_NAME +#define _DI SYSTEM_RESERVED_NAME +#define _LO SYSTEM_RESERVED_NAME +#define _PU SYSTEM_RESERVED_NAME +#define _SP SYSTEM_RESERVED_NAME +#define _UP SYSTEM_RESERVED_NAME +#define _XD SYSTEM_RESERVED_NAME diff --git a/clang/test/Headers/x86-intrinsics-headers-clean.cpp b/clang/test/Headers/x86-intrinsics-headers-clean.cpp index a19207f..0a04bce 100644 --- a/clang/test/Headers/x86-intrinsics-headers-clean.cpp +++ b/clang/test/Headers/x86-intrinsics-headers-clean.cpp @@ -10,4 +10,6 @@ // expected-no-diagnostics +#include "system_reserved_names.h" + #include <x86intrin.h> diff --git a/clang/test/Headers/x86-intrinsics-headers.c b/clang/test/Headers/x86-intrinsics-headers.c index dc06cbd..89a7d4d 100644 --- a/clang/test/Headers/x86-intrinsics-headers.c +++ b/clang/test/Headers/x86-intrinsics-headers.c @@ -5,6 +5,8 @@ // XFAIL: target=arm64ec-pc-windows-msvc // These intrinsics are not yet implemented for Arm64EC. +#include "system_reserved_names.h" + #if defined(i386) || defined(__x86_64__) #ifdef __SSE4_2__ diff --git a/clang/test/Modules/pr133057.cpp b/clang/test/Modules/pr133057.cpp new file mode 100644 index 0000000..b273fc3 --- /dev/null +++ b/clang/test/Modules/pr133057.cpp @@ -0,0 +1,143 @@ +// RUN: rm -rf %t +// RUN: mkdir -p %t +// RUN: split-file %s %t +// +// RUN: %clang_cc1 -xc++ -std=c++20 -emit-module -fmodule-name=hf -fno-cxx-modules -fmodules -fno-implicit-modules %t/CMO.cppmap -o %t/WI9.pcm +// RUN: %clang_cc1 -xc++ -std=c++20 -emit-module -fmodule-name=g -fno-cxx-modules -fmodules -fno-implicit-modules -fmodule-file=%t/WI9.pcm %t/E6H.cppmap -o %t/4BK.pcm +// RUN: %clang_cc1 -xc++ -std=c++20 -emit-module -fmodule-name=r -fno-cxx-modules -fmodules -fno-implicit-modules -fmodule-file=%t/WI9.pcm %t/HMT.cppmap -o %t/LUM.pcm +// RUN: %clang_cc1 -xc++ -std=c++20 -emit-module -fmodule-name=q -fno-cxx-modules -fmodules -fno-implicit-modules -fmodule-file=%t/LUM.pcm -fmodule-file=%t/4BK.pcm %t/JOV.cppmap -o %t/9VX.pcm +// RUN: %clang_cc1 -xc++ -std=c++20 -verify -fsyntax-only -fno-cxx-modules -fmodules -fno-implicit-modules -fmodule-file=%t/9VX.pcm %t/XFD.cc + +//--- 2OT.h +#include "LQ1.h" + +namespace ciy { +namespace xqk { +template <typename> +class vum { + public: + using sc = std::C::wmd; + friend bool operator==(vum, vum); +}; +template <typename> +class me { + public: + using vbh = vum<me>; + using sc = std::C::vy<vbh>::sc; + template <typename db> + operator db() { return {}; } +}; +} // namespace xqk +template <typename vus> +xqk::me<vus> uvo(std::C::wmd, vus); +} // namespace ciy + +class ua { + std::C::wmd kij() { + ciy::uvo(kij(), '-'); + return {}; + } +}; + +//--- 9KF.h +#include "LQ1.h" +#include "2OT.h" +namespace { +void al(std::C::wmd lou) { std::C::jv<std::C::wmd> yt = ciy::uvo(lou, '/'); } +} // namespace + +//--- CMO.cppmap +module "hf" { +header "LQ1.h" +} + + +//--- E6H.cppmap +module "g" { +export * +header "2OT.h" +} + + +//--- HMT.cppmap +module "r" { +header "2OT.h" +} + + +//--- JOV.cppmap +module "q" { +header "9KF.h" +} + + +//--- LQ1.h +namespace std { +namespace C { +template <class zd> +struct vy : zd {}; +template <class ub> +struct vy<ub*> { + typedef ub jz; +}; +struct wmd {}; +template <class uo, class zt> +void sk(uo k, zt gf) { + (void)(k != gf); +} +template <class uo> +class fm { + public: + fm(uo); +}; +template <class kj, class kju> +bool operator==(kj, kju); +template <class epn> +void afm(epn) { + using yp = vy<epn>; + if (__is_trivially_copyable(yp)) { + sk(fm(epn()), nullptr); + } +} +template <class ub> +class jv { + public: + constexpr void gq(); + ub *nef; +}; +template <class ub> +constexpr void jv<ub>::gq() { + afm(nef); +} +} // namespace C +} // namespace std +namespace ciy { +} // namespace ciy + +//--- XFD.cc +// expected-no-diagnostics +#include "LQ1.h" +#include "2OT.h" +class wiy { + public: + std::C::wmd eyb(); +}; +template <typename wpa> +void i(wpa fg) { + std::C::jv<std::C::wmd> zs; + zs = ciy::uvo(fg.eyb(), '\n'); +} +namespace ciy { +namespace xqk { +struct sbv; +std::C::jv<sbv> ns() { + std::C::jv<sbv> ubs; + ubs.gq(); + return ubs; +} +} // namespace xqk +} // namespace ciy +void s() { + wiy fg; + i(fg); +} diff --git a/clang/test/SemaCUDA/vararg.cu b/clang/test/SemaCUDA/vararg.cu index 0238f42..62693e1 100644 --- a/clang/test/SemaCUDA/vararg.cu +++ b/clang/test/SemaCUDA/vararg.cu @@ -1,11 +1,9 @@ // REQUIRES: x86-registered-target // REQUIRES: nvptx-registered-target // RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device -fsyntax-only \ -// RUN: -verify -DEXPECT_VA_ARG_ERR -DEXPECT_VARARG_ERR %s +// RUN: -verify -DEXPECT_VA_ARG_ERR %s // RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device -fsyntax-only \ // RUN: -fcuda-allow-variadic-functions -verify -DEXPECT_VA_ARG_ERR %s -// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fsyntax-only -verify \ -// RUN: -DEXPECT_VARARG_ERR %s #include <stdarg.h> #include "Inputs/cuda.h" @@ -30,28 +28,15 @@ __device__ void baz() { #endif } -__device__ void vararg(const char* x, ...) {} -#ifdef EXPECT_VARARG_ERR -// expected-error@-2 {{CUDA device code does not support variadic functions}} -#endif +__device__ void vararg(const char* x, ...) {} // OK template <typename T> -__device__ void vararg(T t, ...) {} -#ifdef EXPECT_VARARG_ERR -// expected-error@-2 {{CUDA device code does not support variadic functions}} -#endif +__device__ void vararg(T t, ...) {} // OK extern "C" __device__ int printf(const char* fmt, ...); // OK, special case. -// Definition of printf not allowed. -extern "C" __device__ int printf(const char* fmt, ...) { return 0; } -#ifdef EXPECT_VARARG_ERR -// expected-error@-2 {{CUDA device code does not support variadic functions}} -#endif +extern "C" __device__ int printf(const char* fmt, ...) { return 0; } // OK namespace ns { -__device__ int printf(const char* fmt, ...); -#ifdef EXPECT_VARARG_ERR -// expected-error@-2 {{CUDA device code does not support variadic functions}} -#endif +__device__ int printf(const char* fmt, ...); // OK } diff --git a/cross-project-tests/debuginfo-tests/dexter-tests/lit.local.cfg b/cross-project-tests/debuginfo-tests/dexter-tests/lit.local.cfg index bace385..6b71133 100644 --- a/cross-project-tests/debuginfo-tests/dexter-tests/lit.local.cfg +++ b/cross-project-tests/debuginfo-tests/dexter-tests/lit.local.cfg @@ -1,2 +1,5 @@ if "dexter" not in config.available_features: config.unsupported = True + +# LLDB through lldb-dap causes spurious failures. +config.test_retry_attempts = 2 diff --git a/cross-project-tests/debuginfo-tests/dexter/feature_tests/lit.local.cfg b/cross-project-tests/debuginfo-tests/dexter/feature_tests/lit.local.cfg index 16b9690..3b98bf5 100644 --- a/cross-project-tests/debuginfo-tests/dexter/feature_tests/lit.local.cfg +++ b/cross-project-tests/debuginfo-tests/dexter/feature_tests/lit.local.cfg @@ -3,3 +3,6 @@ if "dexter" not in config.available_features: config.name = "DExTer feature tests" config.suffixes = [".cpp", ".c", ".test"] + +# LLDB through lldb-dap causes spurious failures. +config.test_retry_attempts = 2 diff --git a/libc/config/linux/aarch64/headers.txt b/libc/config/linux/aarch64/headers.txt index 6d3bc91..0573851 100644 --- a/libc/config/linux/aarch64/headers.txt +++ b/libc/config/linux/aarch64/headers.txt @@ -18,6 +18,7 @@ set(TARGET_PUBLIC_HEADERS libc.include.locale libc.include.malloc libc.include.math + libc.include.netinet_in libc.include.poll libc.include.pthread libc.include.sched diff --git a/libc/config/linux/riscv/headers.txt b/libc/config/linux/riscv/headers.txt index 6d3bc91..0573851 100644 --- a/libc/config/linux/riscv/headers.txt +++ b/libc/config/linux/riscv/headers.txt @@ -18,6 +18,7 @@ set(TARGET_PUBLIC_HEADERS libc.include.locale libc.include.malloc libc.include.math + libc.include.netinet_in libc.include.poll libc.include.pthread libc.include.sched diff --git a/libc/config/linux/x86_64/headers.txt b/libc/config/linux/x86_64/headers.txt index 6d3bc91..0573851 100644 --- a/libc/config/linux/x86_64/headers.txt +++ b/libc/config/linux/x86_64/headers.txt @@ -18,6 +18,7 @@ set(TARGET_PUBLIC_HEADERS libc.include.locale libc.include.malloc libc.include.math + libc.include.netinet_in libc.include.poll libc.include.pthread libc.include.sched diff --git a/libc/include/CMakeLists.txt b/libc/include/CMakeLists.txt index 8fd37b0..afa90e6 100644 --- a/libc/include/CMakeLists.txt +++ b/libc/include/CMakeLists.txt @@ -191,6 +191,17 @@ add_header_macro( .inttypes ) +file(MAKE_DIRECTORY ${LIBC_INCLUDE_DIR}/netinet) + +add_header_macro( + netinet_in + ../libc/include/netinet/in.yaml + netinet/in.h + DEPENDS + .llvm_libc_common_h + .llvm-libc-macros.netinet_in_macros +) + add_header_macro( assert ../libc/include/assert.yaml diff --git a/libc/include/llvm-libc-macros/CMakeLists.txt b/libc/include/llvm-libc-macros/CMakeLists.txt index f715403..76c03d9 100644 --- a/libc/include/llvm-libc-macros/CMakeLists.txt +++ b/libc/include/llvm-libc-macros/CMakeLists.txt @@ -153,6 +153,12 @@ add_macro_header( ) add_macro_header( + netinet_in_macros + HDR + netinet-in-macros.h +) + +add_macro_header( offsetof_macro HDR offsetof-macro.h diff --git a/libc/include/llvm-libc-macros/netinet-in-macros.h b/libc/include/llvm-libc-macros/netinet-in-macros.h new file mode 100644 index 0000000..c05e5e2 --- /dev/null +++ b/libc/include/llvm-libc-macros/netinet-in-macros.h @@ -0,0 +1,19 @@ +//===-- Definition of macros from netinet/in.h ----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_MACROS_NETINET_IN_MACROS_H +#define LLVM_LIBC_MACROS_NETINET_IN_MACROS_H + +#define IPPROTO_IP 0 +#define IPPROTO_ICMP 1 +#define IPPROTO_TCP 6 +#define IPPROTO_UDP 17 +#define IPPROTO_IPV6 41 +#define IPPROTO_RAW 255 + +#endif // LLVM_LIBC_MACROS_NETINET_IN_MACROS_H diff --git a/libc/include/netinet/in.h.def b/libc/include/netinet/in.h.def new file mode 100644 index 0000000..d9a6a6e --- /dev/null +++ b/libc/include/netinet/in.h.def @@ -0,0 +1,18 @@ +//===-- C standard library header in.h ------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_NETINET_IN_H +#define LLVM_LIBC_NETINET_IN_H + +#include "__llvm-libc-common.h" + +#include "../llvm-libc-macros/netinet-in-macros.h" + +%%public_api() + +#endif // LLVM_LIBC_NETINET_IN_H diff --git a/libc/include/netinet/in.yaml b/libc/include/netinet/in.yaml new file mode 100644 index 0000000..fbd798d --- /dev/null +++ b/libc/include/netinet/in.yaml @@ -0,0 +1,7 @@ +header: netinet/in.h +header_template: in.h.def +macros: [] +types: [] +enums: [] +objects: [] +functions: [] diff --git a/libc/test/include/CMakeLists.txt b/libc/test/include/CMakeLists.txt index 11e4c3a..3ac5615 100644 --- a/libc/test/include/CMakeLists.txt +++ b/libc/test/include/CMakeLists.txt @@ -208,6 +208,16 @@ add_libc_test( ) add_libc_test( + netinet_in_test + SUITE + libc_include_tests + SRCS + netinet_in_test.cpp + DEPENDS + libc.include.llvm-libc-macros.netinet_in_macros +) + +add_libc_test( signbit_test SUITE libc_include_tests diff --git a/libc/test/include/netinet_in_test.cpp b/libc/test/include/netinet_in_test.cpp new file mode 100644 index 0000000..a6c47a7 --- /dev/null +++ b/libc/test/include/netinet_in_test.cpp @@ -0,0 +1,19 @@ +//===-- Unittests for netinet/in macro ------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "include/llvm-libc-macros/netinet-in-macros.h" +#include "test/UnitTest/Test.h" + +TEST(LlvmLibcNetinetInTest, IPPROTOMacro) { + EXPECT_EQ(IPPROTO_IP, 0); + EXPECT_EQ(IPPROTO_ICMP, 1); + EXPECT_EQ(IPPROTO_TCP, 6); + EXPECT_EQ(IPPROTO_UDP, 17); + EXPECT_EQ(IPPROTO_IPV6, 41); + EXPECT_EQ(IPPROTO_RAW, 255); +} diff --git a/libclc/CMakeLists.txt b/libclc/CMakeLists.txt index 7960f34..ba0fc4b 100644 --- a/libclc/CMakeLists.txt +++ b/libclc/CMakeLists.txt @@ -225,7 +225,7 @@ set( tahiti_aliases pitcairn verde oland hainan bonaire kabini kaveri hawaii gfx1030 gfx1031 gfx1032 gfx1033 gfx1034 gfx1035 gfx1036 gfx1100 gfx1101 gfx1102 gfx1103 gfx1150 gfx1151 gfx1152 gfx1153 - gfx1200 gfx1201 + gfx1200 gfx1201 gfx1250 gfx1251 ) # pkg-config file diff --git a/lldb/unittests/Target/CMakeLists.txt b/lldb/unittests/Target/CMakeLists.txt index 0c79675..83eec3b 100644 --- a/lldb/unittests/Target/CMakeLists.txt +++ b/lldb/unittests/Target/CMakeLists.txt @@ -2,7 +2,7 @@ add_lldb_unittest(TargetTests ABITest.cpp DynamicRegisterInfoTest.cpp ExecutionContextTest.cpp - Language.cpp + LanguageTest.cpp LocateModuleCallbackTest.cpp MemoryRegionInfoTest.cpp MemoryTest.cpp diff --git a/lldb/unittests/Target/Language.cpp b/lldb/unittests/Target/LanguageTest.cpp index a00fda78..a00fda78 100644 --- a/lldb/unittests/Target/Language.cpp +++ b/lldb/unittests/Target/LanguageTest.cpp diff --git a/llvm/docs/PDB/HashTable.rst b/llvm/docs/PDB/HashTable.rst index 581ec59..7420510 100644 --- a/llvm/docs/PDB/HashTable.rst +++ b/llvm/docs/PDB/HashTable.rst @@ -17,8 +17,8 @@ a consumer to read a list of values and reconstruct the hash table on the fly. The serialization format supports hash tables of arbitrarily large size and capacity, as well as value types and hash functions. The only supported key value type is a uint32. The only requirement is that the producer and consumer -agree on the hash function. As such, the hash function can is not discussed -further in this document, it is assumed that for a particular instance of a PDB +agree on the hash function. As such, the hash function is not discussed +further in this document. It is assumed that for a particular instance of a PDB file hash table, the appropriate hash function is being used. On-Disk Format diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td index 9cfab26..3af1750 100644 --- a/llvm/include/llvm/IR/IntrinsicsNVVM.td +++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td @@ -272,6 +272,10 @@ class WMMA_REGS<string Geom, string Frag, string PtxEltType, bit IsSparse = fals !eq(gft,"m16n8k16:d:f32") : !listsplat(llvm_float_ty, 4), !eq(gft,"m16n8k4:c:f32") : !listsplat(llvm_float_ty, 4), !eq(gft,"m16n8k4:d:f32") : !listsplat(llvm_float_ty, 4), + !eq(gft,"m16n8k32:c:f16") : !listsplat(llvm_v2f16_ty, 2), + !eq(gft,"m16n8k32:c:f32") : !listsplat(llvm_float_ty, 4), + !eq(gft,"m16n8k32:d:f16") : !listsplat(llvm_v2f16_ty, 2), + !eq(gft,"m16n8k32:d:f32") : !listsplat(llvm_float_ty, 4), // wmma fp16 -> fp16/fp32 @ m16n16k16/m8n32k16/m32n8k16 // All other supported geometries use the same fragment format for f32 and @@ -298,6 +302,21 @@ class WMMA_REGS<string Geom, string Frag, string PtxEltType, bit IsSparse = fals !eq(gft,"m8n8k4:c:f64") : !listsplat(llvm_double_ty, 2), !eq(gft,"m8n8k4:d:f64") : !listsplat(llvm_double_ty, 2), + !eq(gft,"m16n8k4:a:f64") : !listsplat(llvm_double_ty, 2), + !eq(gft,"m16n8k4:b:f64") : [llvm_double_ty], + !eq(gft,"m16n8k4:c:f64") : !listsplat(llvm_double_ty, 4), + !eq(gft,"m16n8k4:d:f64") : !listsplat(llvm_double_ty, 4), + + !eq(gft,"m16n8k8:a:f64") : !listsplat(llvm_double_ty, 4), + !eq(gft,"m16n8k8:b:f64") : !listsplat(llvm_double_ty, 2), + !eq(gft,"m16n8k8:c:f64") : !listsplat(llvm_double_ty, 4), + !eq(gft,"m16n8k8:d:f64") : !listsplat(llvm_double_ty, 4), + + !eq(gft,"m16n8k16:a:f64") : !listsplat(llvm_double_ty, 8), + !eq(gft,"m16n8k16:b:f64") : !listsplat(llvm_double_ty, 4), + !eq(gft,"m16n8k16:c:f64") : !listsplat(llvm_double_ty, 4), + !eq(gft,"m16n8k16:d:f64") : !listsplat(llvm_double_ty, 4), + // wmma bf16 -> s32 @ m16n16k16/m8n32k16/m32n8k16 !eq(gft,"m16n16k16:a:bf16") : !listsplat(llvm_i32_ty, 4), !eq(gft,"m16n16k16:b:bf16") : !listsplat(llvm_i32_ty, 4), @@ -378,6 +397,26 @@ class WMMA_REGS<string Geom, string Frag, string PtxEltType, bit IsSparse = fals !eq(gft,"m16n8k64:c:s32") : !listsplat(llvm_i32_ty, 4), !eq(gft,"m16n8k64:d:s32") : !listsplat(llvm_i32_ty, 4), + // mma e4m3/e5m2 -> f16/f32 @ m16n8k16 + !eq(gft,"m16n8k16:a:e4m3") : !listsplat(llvm_i32_ty, 2), + !eq(gft,"m16n8k16:a:e5m2") : !listsplat(llvm_i32_ty, 2), + !eq(gft,"m16n8k16:b:e4m3") : [llvm_i32_ty], + !eq(gft,"m16n8k16:b:e5m2") : [llvm_i32_ty], + // mma e4m3/e5m2/e3m2/e2m3/e2m1 -> f32 @ m16n8k32 + !eq(gft,"m16n8k32:a:e4m3") : !listsplat(llvm_i32_ty, 4), + !eq(gft,"m16n8k32:a:e5m2") : !listsplat(llvm_i32_ty, 4), + !eq(gft,"m16n8k32:a:e3m2") : !listsplat(llvm_i32_ty, 4), + !eq(gft,"m16n8k32:a:e2m3") : !listsplat(llvm_i32_ty, 4), + !eq(gft,"m16n8k32:a:e2m1") : !listsplat(llvm_i32_ty, 4), + !eq(gft,"m16n8k32:b:e4m3") : !listsplat(llvm_i32_ty, 2), + !eq(gft,"m16n8k32:b:e5m2") : !listsplat(llvm_i32_ty, 2), + !eq(gft,"m16n8k32:b:e3m2") : !listsplat(llvm_i32_ty, 2), + !eq(gft,"m16n8k32:b:e2m3") : !listsplat(llvm_i32_ty, 2), + !eq(gft,"m16n8k32:b:e2m1") : !listsplat(llvm_i32_ty, 2), + // mma e2m1 -> f32 @m16n8k64 + !eq(gft,"m16n8k64:a:e2m1") : !listsplat(llvm_i32_ty, 4), + !eq(gft,"m16n8k64:b:e2m1") : !listsplat(llvm_i32_ty, 2), + // wmma/mma b1 -> s32 @ m8n8k128(b1) !eq(gft,"m8n8k128:a:b1") : [llvm_i32_ty], !eq(gft,"m8n8k128:b:b1") : [llvm_i32_ty], @@ -468,7 +507,7 @@ class WMMA_NAME<string ALayout, string BLayout, int Satfinite, string Rnd, strin # !if(Satfinite, "_satfinite", ""); } -class MMA_NAME<string ALayout, string BLayout, int Satfinite, string b1op, +class MMA_NAME<string ALayout, string BLayout, int Satfinite, string b1op, string Kind, WMMA_REGS A, WMMA_REGS B, WMMA_REGS C, WMMA_REGS D> { string signature = MMA_SIGNATURE<A, B, C, D>.ret; string record = "int_nvvm_mma" @@ -476,6 +515,7 @@ class MMA_NAME<string ALayout, string BLayout, int Satfinite, string b1op, # "_" # A.geom # "_" # ALayout # "_" # BLayout + # !if(!ne(Kind, ""), !strconcat("_", !subst("::", "_", Kind)), "") # !if(Satfinite, "_satfinite", "") # signature; } @@ -601,7 +641,7 @@ class NVVM_MMA_OPS { ["m16n8k16", "m16n8k8"], ["bf16"], [], ["f32"], []>.ret; list<list<WMMA_REGS>> f64_mma_ops = MMA_OPS< - ["m8n8k4"], + ["m8n8k4", "m16n8k4", "m16n8k8", "m16n8k16"], ["f64"], [], ["f64"], []>.ret; list<list<WMMA_REGS>> fp_mma_ops = MMA_OPS< ["m8n8k4", "m16n8k8", "m16n8k16"], @@ -609,6 +649,18 @@ class NVVM_MMA_OPS { list<list<WMMA_REGS>> int_mma_ops = MMA_OPS< ["m8n8k16", "m16n8k16", "m16n8k32"], ["s8", "u8"], ["s8", "u8"], ["s32"], []>.ret; + // m16n8k32 fp8 variants are intersected with f8f6f4 variants + // and processed there + list<list<WMMA_REGS>> fp8_mma_ops = MMA_OPS< + ["m16n8k16"], + ["e4m3", "e5m2"], ["e4m3", "e5m2"], + ["f16", "f32"], ["f16", "f32"]>.ret; + // it also contains e4m3/e5m2 from fp8 variants + list<list<WMMA_REGS>> f8f6f4_mma_ops = MMA_OPS< + ["m16n8k32"], + ["e4m3", "e5m2", "e3m2", "e2m3", "e2m1"], + ["e4m3", "e5m2", "e3m2", "e2m3", "e2m1"], + ["f16", "f32"], ["f16", "f32"]>.ret; list<list<WMMA_REGS>> subint_mma_ops = MMA_OPS< ["m8n8k32", "m16n8k32", "m16n8k64"], ["s4", "u4"], ["s4", "u4"], ["s32"], []>.ret; @@ -617,7 +669,8 @@ class NVVM_MMA_OPS { ["b1"], [], ["s32"], []>.ret; list<list<WMMA_REGS>> all_mma_ops = !listconcat( tf32_mma_ops, bf16_mma_ops, f64_mma_ops, - fp_mma_ops, int_mma_ops, subint_mma_ops, bit_mma_ops); + fp_mma_ops, fp8_mma_ops, f8f6f4_mma_ops, + int_mma_ops, subint_mma_ops, bit_mma_ops); list<list<WMMA_REGS>> bf16_mma_sp_ops = MMA_OPS< ["m16n8k16", "m16n8k32"], @@ -770,7 +823,8 @@ class NVVM_MMA_B1OPS<list<WMMA_REGS> frags> { // if NVVM_MMA_SUPPORTED<...>.ret then // def : FOO<>; // The record will only be defined for supported ops. // -class NVVM_MMA_SUPPORTED<list<WMMA_REGS> frags, string layout_a, string layout_b, int satf> { +class NVVM_MMA_SUPPORTED<list<WMMA_REGS> frags, string layout_a, string layout_b, + string kind, int satf> { // MMA ops check both layouts. string layout = layout_a # ":" # layout_b; string a_type = frags[0].ptx_elt_type; @@ -805,10 +859,31 @@ class NVVM_MMA_SUPPORTED<list<WMMA_REGS> frags, string layout_a, string layout_b !or(!ne(a_type, b_type), !ne(c_type, d_type))): false, - // m16n8k8 requires C and D to be the same type. - !and(!eq(geom, "m16n8k8"), + // m16n8k16/m16n8k32 requires C and D to be the same type + !and(!or(!eq(geom, "m16n8k16"), + !eq(geom, "m16n8k32")), !ne(c_type, d_type)): false, + // Limit kind to valid types and geometries + !and(!ne(kind, ""), + !or(!ne(geom, "m16n8k32"), + !and(!ne(a_type, "e4m3"), + !ne(a_type, "e5m2"), + !ne(a_type, "e3m2"), + !ne(a_type, "e2m3"), + !ne(a_type, "e2m1")))): false, + + // Limit m16n8k16/m16n8k32 with no kind to valid types + !and(!eq(kind, ""), + !or(!eq(geom, "m16n8k16"), + !eq(geom, "m16n8k32")), + !or(!eq(a_type, "e3m2"), + !eq(a_type, "e2m3"), + !eq(a_type, "e2m1"), + !eq(b_type, "e3m2"), + !eq(b_type, "e2m3"), + !eq(b_type, "e2m1"))): false, + // All other are OK. true: true ); @@ -882,9 +957,10 @@ class NVVM_MMA_SP_SUPPORTED<list<WMMA_REGS> frags, string metadata, !eq(a_type, "tf32")), !ne(a_type, b_type)): false, - // m16n8k16 and m16n8k32 requires C and D to be the same type. + // m16n8k16, m16n8k32 and m16n8k64 requires C and D to be the same type. !and(!or(!eq(geom, "m16n8k16"), - !eq(geom, "m16n8k32")), + !eq(geom, "m16n8k32"), + !eq(geom, "m16n8k64")), !ne(c_type, d_type)): false, !and(!eq(kind, ""), @@ -1493,6 +1569,18 @@ let TargetPrefix = "nvvm" in { } } + // RS rounding mode (Stochastic Rounding) conversions for f16x2, bf16x2 types + // The last i32 operand provides the random bits for the conversion + foreach relu = ["", "_relu"] in { + foreach satfinite = ["", "_satfinite"] in { + def int_nvvm_ff2f16x2_rs # relu # satfinite : NVVMBuiltin, + PureIntrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty]>; + + def int_nvvm_ff2bf16x2_rs # relu # satfinite : NVVMBuiltin, + PureIntrinsic<[llvm_v2bf16_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty]>; + } + } + foreach satfinite = ["", "_satfinite"] in { def int_nvvm_f2tf32_rna # satfinite : NVVMBuiltin, PureIntrinsic<[llvm_i32_ty], [llvm_float_ty]>; @@ -1515,6 +1603,15 @@ let TargetPrefix = "nvvm" in { PureIntrinsic<[llvm_v2f16_ty], [llvm_i16_ty]>; } } + + // RS rounding mode (Stochastic Rounding) conversions for f8x4 types + // The last i32 operand provides the random bits for the conversion + foreach type = ["e4m3x4", "e5m2x4"] in { + foreach relu = ["", "_relu"] in { + def int_nvvm_f32x4_to_ # type # _rs # relu # _satfinite : NVVMBuiltin, + PureIntrinsic<[llvm_v4i8_ty], [llvm_v4f32_ty, llvm_i32_ty]>; + } + } // FP4 conversions. foreach relu = ["", "_relu"] in { @@ -1524,6 +1621,13 @@ let TargetPrefix = "nvvm" in { def int_nvvm_e2m1x2_to_f16x2_rn # relu : NVVMBuiltin, PureIntrinsic<[llvm_v2f16_ty], [llvm_i16_ty]>; } + + // RS rounding mode (Stochastic Rounding) conversions for f4x4 type + // The last i32 operand provides the random bits for the conversion + foreach relu = ["", "_relu"] in { + def int_nvvm_f32x4_to_e2m1x4_rs # relu # _satfinite : NVVMBuiltin, + PureIntrinsic<[llvm_i16_ty], [llvm_v4f32_ty, llvm_i32_ty]>; + } // FP6 conversions. foreach type = ["e2m3x2", "e3m2x2"] in { @@ -1535,6 +1639,15 @@ let TargetPrefix = "nvvm" in { PureIntrinsic<[llvm_v2f16_ty], [llvm_i16_ty]>; } } + + // RS rounding mode (Stochastic Rounding) conversions for f6x4 types + // The last i32 operand provides the random bits for the conversion + foreach type = ["e2m3x4", "e3m2x4"] in { + foreach relu = ["", "_relu"] in { + def int_nvvm_f32x4_to_ # type # _rs # relu # _satfinite : NVVMBuiltin, + PureIntrinsic<[llvm_v4i8_ty], [llvm_v4f32_ty, llvm_i32_ty]>; + } + } // UE8M0x2 conversions. foreach rmode = ["_rz", "_rp"] in { @@ -2215,10 +2328,12 @@ foreach layout_a = ["row", "col"] in { foreach satf = [0, 1] in { foreach op = NVVM_MMA_OPS.all_mma_ops in { foreach b1op = NVVM_MMA_B1OPS<op>.ret in { - if NVVM_MMA_SUPPORTED<op, layout_a, layout_b, satf>.ret then { - def MMA_NAME<layout_a, layout_b, satf, b1op, op[0], op[1], op[2], op[3]>.record - : NVVM_MMA<op[0], op[1], op[2], op[3]>; - } + foreach kind = ["", "kind::f8f6f4"] in { + if NVVM_MMA_SUPPORTED<op, layout_a, layout_b, kind, satf>.ret then { + def MMA_NAME<layout_a, layout_b, satf, b1op, kind, op[0], op[1], op[2], op[3]>.record + : NVVM_MMA<op[0], op[1], op[2], op[3]>; + } + } // kind } // b1op } // op } // satf diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp index 4357264d..b98f797 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -345,12 +345,6 @@ static unsigned getStackHazardSize(const MachineFunction &MF) { return MF.getSubtarget<AArch64Subtarget>().getStreamingHazardSize(); } -/// Returns true if PPRs are spilled as ZPRs. -static bool arePPRsSpilledAsZPR(const MachineFunction &MF) { - return MF.getSubtarget().getRegisterInfo()->getSpillSize( - AArch64::PPRRegClass) == 16; -} - StackOffset AArch64FrameLowering::getZPRStackSize(const MachineFunction &MF) const { const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); @@ -1966,8 +1960,7 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters( StrOpc = RPI.isPaired() ? AArch64::ST1B_2Z_IMM : AArch64::STR_ZXI; break; case RegPairInfo::PPR: - StrOpc = - Size == 16 ? AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO : AArch64::STR_PXI; + StrOpc = AArch64::STR_PXI; break; case RegPairInfo::VG: StrOpc = AArch64::STRXui; @@ -2178,8 +2171,7 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters( LdrOpc = RPI.isPaired() ? AArch64::LD1B_2Z_IMM : AArch64::LDR_ZXI; break; case RegPairInfo::PPR: - LdrOpc = Size == 16 ? AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO - : AArch64::LDR_PXI; + LdrOpc = AArch64::LDR_PXI; break; case RegPairInfo::VG: continue; @@ -2286,9 +2278,7 @@ static std::optional<int> getLdStFrameID(const MachineInstr &MI, // Returns true if the LDST MachineInstr \p MI is a PPR access. static bool isPPRAccess(const MachineInstr &MI) { - return MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO && - MI.getOpcode() != AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO && - AArch64::PPRRegClass.contains(MI.getOperand(0).getReg()); + return AArch64::PPRRegClass.contains(MI.getOperand(0).getReg()); } // Check if a Hazard slot is needed for the current function, and if so create @@ -2390,12 +2380,6 @@ void AArch64FrameLowering::determineStackHazardSlot( return; } - if (arePPRsSpilledAsZPR(MF)) { - LLVM_DEBUG(dbgs() << "SplitSVEObjects is not supported with " - "-aarch64-enable-zpr-predicate-spills"); - return; - } - // If another calling convention is explicitly set FPRs can't be promoted to // ZPR callee-saves. if (!is_contained({CallingConv::C, CallingConv::Fast, @@ -2519,14 +2503,6 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, continue; } - // Always save P4 when PPR spills are ZPR-sized and a predicate above p8 is - // spilled. If all of p0-p3 are used as return values p4 is must be free - // to reload p8-p15. - if (RegInfo->getSpillSize(AArch64::PPRRegClass) == 16 && - AArch64::PPR_p8to15RegClass.contains(Reg)) { - SavedRegs.set(AArch64::P4); - } - // MachO's compact unwind format relies on all registers being stored in // pairs. // FIXME: the usual format is actually better if unwinding isn't needed. @@ -2587,7 +2563,7 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, auto SpillSize = TRI->getSpillSize(*RC); bool IsZPR = AArch64::ZPRRegClass.contains(Reg); bool IsPPR = !IsZPR && AArch64::PPRRegClass.contains(Reg); - if (IsZPR || (IsPPR && arePPRsSpilledAsZPR(MF))) + if (IsZPR) ZPRCSStackSize += SpillSize; else if (IsPPR) PPRCSStackSize += SpillSize; @@ -2961,314 +2937,8 @@ static SVEStackSizes determineSVEStackSizes(MachineFunction &MF, return SVEStack; } -/// Attempts to scavenge a register from \p ScavengeableRegs given the used -/// registers in \p UsedRegs. -static Register tryScavengeRegister(LiveRegUnits const &UsedRegs, - BitVector const &ScavengeableRegs, - Register PreferredReg) { - if (PreferredReg != AArch64::NoRegister && UsedRegs.available(PreferredReg)) - return PreferredReg; - for (auto Reg : ScavengeableRegs.set_bits()) { - if (UsedRegs.available(Reg)) - return Reg; - } - return AArch64::NoRegister; -} - -/// Propagates frame-setup/destroy flags from \p SourceMI to all instructions in -/// \p MachineInstrs. -static void propagateFrameFlags(MachineInstr &SourceMI, - ArrayRef<MachineInstr *> MachineInstrs) { - for (MachineInstr *MI : MachineInstrs) { - if (SourceMI.getFlag(MachineInstr::FrameSetup)) - MI->setFlag(MachineInstr::FrameSetup); - if (SourceMI.getFlag(MachineInstr::FrameDestroy)) - MI->setFlag(MachineInstr::FrameDestroy); - } -} - -/// RAII helper class for scavenging or spilling a register. On construction -/// attempts to find a free register of class \p RC (given \p UsedRegs and \p -/// AllocatableRegs), if no register can be found spills \p SpillCandidate to \p -/// MaybeSpillFI to free a register. The free'd register is returned via the \p -/// FreeReg output parameter. On destruction, if there is a spill, its previous -/// value is reloaded. The spilling and scavenging is only valid at the -/// insertion point \p MBBI, this class should _not_ be used in places that -/// create or manipulate basic blocks, moving the expected insertion point. -struct ScopedScavengeOrSpill { - ScopedScavengeOrSpill(const ScopedScavengeOrSpill &) = delete; - ScopedScavengeOrSpill(ScopedScavengeOrSpill &&) = delete; - - ScopedScavengeOrSpill(MachineFunction &MF, MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, - Register SpillCandidate, const TargetRegisterClass &RC, - LiveRegUnits const &UsedRegs, - BitVector const &AllocatableRegs, - std::optional<int> *MaybeSpillFI, - Register PreferredReg = AArch64::NoRegister) - : MBB(MBB), MBBI(MBBI), RC(RC), TII(static_cast<const AArch64InstrInfo &>( - *MF.getSubtarget().getInstrInfo())), - TRI(*MF.getSubtarget().getRegisterInfo()) { - FreeReg = tryScavengeRegister(UsedRegs, AllocatableRegs, PreferredReg); - if (FreeReg != AArch64::NoRegister) - return; - assert(MaybeSpillFI && "Expected emergency spill slot FI information " - "(attempted to spill in prologue/epilogue?)"); - if (!MaybeSpillFI->has_value()) { - MachineFrameInfo &MFI = MF.getFrameInfo(); - *MaybeSpillFI = MFI.CreateSpillStackObject(TRI.getSpillSize(RC), - TRI.getSpillAlign(RC)); - } - FreeReg = SpillCandidate; - SpillFI = MaybeSpillFI->value(); - TII.storeRegToStackSlot(MBB, MBBI, FreeReg, false, *SpillFI, &RC, &TRI, - Register()); - } - - bool hasSpilled() const { return SpillFI.has_value(); } - - /// Returns the free register (found from scavenging or spilling a register). - Register freeRegister() const { return FreeReg; } - - Register operator*() const { return freeRegister(); } - - ~ScopedScavengeOrSpill() { - if (hasSpilled()) - TII.loadRegFromStackSlot(MBB, MBBI, FreeReg, *SpillFI, &RC, &TRI, - Register()); - } - -private: - MachineBasicBlock &MBB; - MachineBasicBlock::iterator MBBI; - const TargetRegisterClass &RC; - const AArch64InstrInfo &TII; - const TargetRegisterInfo &TRI; - Register FreeReg = AArch64::NoRegister; - std::optional<int> SpillFI; -}; - -/// Emergency stack slots for expanding SPILL_PPR_TO_ZPR_SLOT_PSEUDO and -/// FILL_PPR_FROM_ZPR_SLOT_PSEUDO. -struct EmergencyStackSlots { - std::optional<int> ZPRSpillFI; - std::optional<int> PPRSpillFI; - std::optional<int> GPRSpillFI; -}; - -/// Registers available for scavenging (ZPR, PPR3b, GPR). -struct ScavengeableRegs { - BitVector ZPRRegs; - BitVector PPR3bRegs; - BitVector GPRRegs; -}; - -static bool isInPrologueOrEpilogue(const MachineInstr &MI) { - return MI.getFlag(MachineInstr::FrameSetup) || - MI.getFlag(MachineInstr::FrameDestroy); -} - -/// Expands: -/// ``` -/// SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p0, %stack.0, 0 -/// ``` -/// To: -/// ``` -/// $z0 = CPY_ZPzI_B $p0, 1, 0 -/// STR_ZXI $z0, $stack.0, 0 -/// ``` -/// While ensuring a ZPR ($z0 in this example) is free for the predicate ( -/// spilling if necessary). -static void expandSpillPPRToZPRSlotPseudo(MachineBasicBlock &MBB, - MachineInstr &MI, - const TargetRegisterInfo &TRI, - LiveRegUnits const &UsedRegs, - ScavengeableRegs const &SR, - EmergencyStackSlots &SpillSlots) { - MachineFunction &MF = *MBB.getParent(); - auto *TII = - static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo()); - - ScopedScavengeOrSpill ZPredReg( - MF, MBB, MI, AArch64::Z0, AArch64::ZPRRegClass, UsedRegs, SR.ZPRRegs, - isInPrologueOrEpilogue(MI) ? nullptr : &SpillSlots.ZPRSpillFI); - - SmallVector<MachineInstr *, 2> MachineInstrs; - const DebugLoc &DL = MI.getDebugLoc(); - MachineInstrs.push_back(BuildMI(MBB, MI, DL, TII->get(AArch64::CPY_ZPzI_B)) - .addReg(*ZPredReg, RegState::Define) - .add(MI.getOperand(0)) - .addImm(1) - .addImm(0) - .getInstr()); - MachineInstrs.push_back(BuildMI(MBB, MI, DL, TII->get(AArch64::STR_ZXI)) - .addReg(*ZPredReg) - .add(MI.getOperand(1)) - .addImm(MI.getOperand(2).getImm()) - .setMemRefs(MI.memoperands()) - .getInstr()); - propagateFrameFlags(MI, MachineInstrs); -} - -/// Expands: -/// ``` -/// $p0 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0 -/// ``` -/// To: -/// ``` -/// $z0 = LDR_ZXI %stack.0, 0 -/// $p0 = PTRUE_B 31, implicit $vg -/// $p0 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv -/// ``` -/// While ensuring a ZPR ($z0 in this example) is free for the predicate ( -/// spilling if necessary). If the status flags are in use at the point of -/// expansion they are preserved (by moving them to/from a GPR). This may cause -/// an additional spill if no GPR is free at the expansion point. -static bool expandFillPPRFromZPRSlotPseudo( - MachineBasicBlock &MBB, MachineInstr &MI, const TargetRegisterInfo &TRI, - LiveRegUnits const &UsedRegs, ScavengeableRegs const &SR, - MachineInstr *&LastPTrue, EmergencyStackSlots &SpillSlots) { - MachineFunction &MF = *MBB.getParent(); - auto *TII = - static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo()); - - ScopedScavengeOrSpill ZPredReg( - MF, MBB, MI, AArch64::Z0, AArch64::ZPRRegClass, UsedRegs, SR.ZPRRegs, - isInPrologueOrEpilogue(MI) ? nullptr : &SpillSlots.ZPRSpillFI); - - ScopedScavengeOrSpill PredReg( - MF, MBB, MI, AArch64::P0, AArch64::PPR_3bRegClass, UsedRegs, SR.PPR3bRegs, - isInPrologueOrEpilogue(MI) ? nullptr : &SpillSlots.PPRSpillFI, - /*PreferredReg=*/ - LastPTrue ? LastPTrue->getOperand(0).getReg() : AArch64::NoRegister); - - // Elide NZCV spills if we know it is not used. - bool IsNZCVUsed = !UsedRegs.available(AArch64::NZCV); - std::optional<ScopedScavengeOrSpill> NZCVSaveReg; - if (IsNZCVUsed) - NZCVSaveReg.emplace( - MF, MBB, MI, AArch64::X0, AArch64::GPR64RegClass, UsedRegs, SR.GPRRegs, - isInPrologueOrEpilogue(MI) ? nullptr : &SpillSlots.GPRSpillFI); - SmallVector<MachineInstr *, 4> MachineInstrs; - const DebugLoc &DL = MI.getDebugLoc(); - MachineInstrs.push_back(BuildMI(MBB, MI, DL, TII->get(AArch64::LDR_ZXI)) - .addReg(*ZPredReg, RegState::Define) - .add(MI.getOperand(1)) - .addImm(MI.getOperand(2).getImm()) - .setMemRefs(MI.memoperands()) - .getInstr()); - if (IsNZCVUsed) - MachineInstrs.push_back( - BuildMI(MBB, MI, DL, TII->get(AArch64::MRS)) - .addReg(NZCVSaveReg->freeRegister(), RegState::Define) - .addImm(AArch64SysReg::NZCV) - .addReg(AArch64::NZCV, RegState::Implicit) - .getInstr()); - - // Reuse previous ptrue if we know it has not been clobbered. - if (LastPTrue) { - assert(*PredReg == LastPTrue->getOperand(0).getReg()); - LastPTrue->moveBefore(&MI); - } else { - LastPTrue = BuildMI(MBB, MI, DL, TII->get(AArch64::PTRUE_B)) - .addReg(*PredReg, RegState::Define) - .addImm(31); - } - MachineInstrs.push_back(LastPTrue); - MachineInstrs.push_back( - BuildMI(MBB, MI, DL, TII->get(AArch64::CMPNE_PPzZI_B)) - .addReg(MI.getOperand(0).getReg(), RegState::Define) - .addReg(*PredReg) - .addReg(*ZPredReg) - .addImm(0) - .addReg(AArch64::NZCV, RegState::ImplicitDefine) - .getInstr()); - if (IsNZCVUsed) - MachineInstrs.push_back(BuildMI(MBB, MI, DL, TII->get(AArch64::MSR)) - .addImm(AArch64SysReg::NZCV) - .addReg(NZCVSaveReg->freeRegister()) - .addReg(AArch64::NZCV, RegState::ImplicitDefine) - .getInstr()); - - propagateFrameFlags(MI, MachineInstrs); - return PredReg.hasSpilled(); -} - -/// Expands all FILL_PPR_FROM_ZPR_SLOT_PSEUDO and SPILL_PPR_TO_ZPR_SLOT_PSEUDO -/// operations within the MachineBasicBlock \p MBB. -static bool expandSMEPPRToZPRSpillPseudos(MachineBasicBlock &MBB, - const TargetRegisterInfo &TRI, - ScavengeableRegs const &SR, - EmergencyStackSlots &SpillSlots) { - LiveRegUnits UsedRegs(TRI); - UsedRegs.addLiveOuts(MBB); - bool HasPPRSpills = false; - MachineInstr *LastPTrue = nullptr; - for (MachineInstr &MI : make_early_inc_range(reverse(MBB))) { - UsedRegs.stepBackward(MI); - switch (MI.getOpcode()) { - case AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO: - if (LastPTrue && - MI.definesRegister(LastPTrue->getOperand(0).getReg(), &TRI)) - LastPTrue = nullptr; - HasPPRSpills |= expandFillPPRFromZPRSlotPseudo(MBB, MI, TRI, UsedRegs, SR, - LastPTrue, SpillSlots); - MI.eraseFromParent(); - break; - case AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO: - expandSpillPPRToZPRSlotPseudo(MBB, MI, TRI, UsedRegs, SR, SpillSlots); - MI.eraseFromParent(); - [[fallthrough]]; - default: - LastPTrue = nullptr; - break; - } - } - - return HasPPRSpills; -} - void AArch64FrameLowering::processFunctionBeforeFrameFinalized( MachineFunction &MF, RegScavenger *RS) const { - - AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); - const TargetSubtargetInfo &TSI = MF.getSubtarget(); - const TargetRegisterInfo &TRI = *TSI.getRegisterInfo(); - - // If predicates spills are 16-bytes we may need to expand - // SPILL_PPR_TO_ZPR_SLOT_PSEUDO/FILL_PPR_FROM_ZPR_SLOT_PSEUDO. - if (AFI->hasStackFrame() && TRI.getSpillSize(AArch64::PPRRegClass) == 16) { - auto ComputeScavengeableRegisters = [&](unsigned RegClassID) { - BitVector Regs = TRI.getAllocatableSet(MF, TRI.getRegClass(RegClassID)); - assert(Regs.count() > 0 && "Expected scavengeable registers"); - return Regs; - }; - - ScavengeableRegs SR{}; - SR.ZPRRegs = ComputeScavengeableRegisters(AArch64::ZPRRegClassID); - // Only p0-7 are possible as the second operand of cmpne (needed for fills). - SR.PPR3bRegs = ComputeScavengeableRegisters(AArch64::PPR_3bRegClassID); - SR.GPRRegs = ComputeScavengeableRegisters(AArch64::GPR64RegClassID); - - EmergencyStackSlots SpillSlots; - for (MachineBasicBlock &MBB : MF) { - // In the case we had to spill a predicate (in the range p0-p7) to reload - // a predicate (>= p8), additional spill/fill pseudos will be created. - // These need an additional expansion pass. Note: There will only be at - // most two expansion passes, as spilling/filling a predicate in the range - // p0-p7 never requires spilling another predicate. - for (int Pass = 0; Pass < 2; Pass++) { - bool HasPPRSpills = - expandSMEPPRToZPRSpillPseudos(MBB, TRI, SR, SpillSlots); - assert((Pass == 0 || !HasPPRSpills) && "Did not expect PPR spills"); - if (!HasPPRSpills) - break; - } - } - } - - MachineFrameInfo &MFI = MF.getFrameInfo(); - assert(getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown && "Upwards growing stack unsupported"); @@ -3279,6 +2949,9 @@ void AArch64FrameLowering::processFunctionBeforeFrameFinalized( if (!MF.hasEHFunclets()) return; + MachineFrameInfo &MFI = MF.getFrameInfo(); + auto *AFI = MF.getInfo<AArch64FunctionInfo>(); + // Win64 C++ EH needs to allocate space for the catch objects in the fixed // object area right next to the UnwindHelp object. WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo(); @@ -4280,18 +3953,10 @@ void AArch64FrameLowering::emitRemarks( } unsigned RegTy = StackAccess::AccessType::GPR; - if (MFI.hasScalableStackID(FrameIdx)) { - // SPILL_PPR_TO_ZPR_SLOT_PSEUDO and FILL_PPR_FROM_ZPR_SLOT_PSEUDO - // spill/fill the predicate as a data vector (so are an FPR access). - if (MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO && - MI.getOpcode() != AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO && - AArch64::PPRRegClass.contains(MI.getOperand(0).getReg())) { - RegTy = StackAccess::PPR; - } else - RegTy = StackAccess::FPR; - } else if (AArch64InstrInfo::isFpOrNEON(MI)) { + if (MFI.hasScalableStackID(FrameIdx)) + RegTy = isPPRAccess(MI) ? StackAccess::PPR : StackAccess::FPR; + else if (AArch64InstrInfo::isFpOrNEON(MI)) RegTy = StackAccess::FPR; - } StackAccesses[ArrIdx].AccessTypes |= RegTy; diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 5a90da1..b8761d97 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2579,8 +2579,6 @@ unsigned AArch64InstrInfo::getLoadStoreImmIdx(unsigned Opc) { case AArch64::STZ2Gi: case AArch64::STZGi: case AArch64::TAGPstack: - case AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO: - case AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO: return 2; case AArch64::LD1B_D_IMM: case AArch64::LD1B_H_IMM: @@ -4387,8 +4385,6 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale, MinOffset = -256; MaxOffset = 254; break; - case AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO: - case AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO: case AArch64::LDR_ZXI: case AArch64::STR_ZXI: Scale = TypeSize::getScalable(16); @@ -5098,33 +5094,31 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB, BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg) .addImm(0) .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); + } else if (Subtarget.hasZeroCycleRegMoveGPR64() && + !Subtarget.hasZeroCycleRegMoveGPR32()) { + // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move. + MCRegister DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32, + &AArch64::GPR64spRegClass); + assert(DestRegX.isValid() && "Destination super-reg not valid"); + MCRegister SrcRegX = + SrcReg == AArch64::WZR + ? AArch64::XZR + : TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32, + &AArch64::GPR64spRegClass); + assert(SrcRegX.isValid() && "Source super-reg not valid"); + // This instruction is reading and writing X registers. This may upset + // the register scavenger and machine verifier, so we need to indicate + // that we are reading an undefined value from SrcRegX, but a proper + // value from SrcReg. + BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX) + .addReg(AArch64::XZR) + .addReg(SrcRegX, RegState::Undef) + .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc)); } else { - if (Subtarget.hasZeroCycleRegMoveGPR64() && - !Subtarget.hasZeroCycleRegMoveGPR32()) { - // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move. - MCRegister DestRegX = TRI->getMatchingSuperReg( - DestReg, AArch64::sub_32, &AArch64::GPR64spRegClass); - assert(DestRegX.isValid() && "Destination super-reg not valid"); - MCRegister SrcRegX = - SrcReg == AArch64::WZR - ? AArch64::XZR - : TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32, - &AArch64::GPR64spRegClass); - assert(SrcRegX.isValid() && "Source super-reg not valid"); - // This instruction is reading and writing X registers. This may upset - // the register scavenger and machine verifier, so we need to indicate - // that we are reading an undefined value from SrcRegX, but a proper - // value from SrcReg. - BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX) - .addReg(AArch64::XZR) - .addReg(SrcRegX, RegState::Undef) - .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc)); - } else { - // Otherwise, expand to ORR WZR. - BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg) - .addReg(AArch64::WZR) - .addReg(SrcReg, getKillRegState(KillSrc)); - } + // Otherwise, expand to ORR WZR. + BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg) + .addReg(AArch64::WZR) + .addReg(SrcReg, getKillRegState(KillSrc)); } return; } @@ -5650,11 +5644,6 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, "Unexpected register store without SVE store instructions"); Opc = AArch64::STR_ZXI; StackID = TargetStackID::ScalableVector; - } else if (AArch64::PPRRegClass.hasSubClassEq(RC)) { - assert(Subtarget.isSVEorStreamingSVEAvailable() && - "Unexpected predicate store without SVE store instructions"); - Opc = AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO; - StackID = TargetStackID::ScalableVector; } break; case 24: @@ -5835,11 +5824,6 @@ void AArch64InstrInfo::loadRegFromStackSlot( "Unexpected register load without SVE load instructions"); Opc = AArch64::LDR_ZXI; StackID = TargetStackID::ScalableVector; - } else if (AArch64::PPRRegClass.hasSubClassEq(RC)) { - assert(Subtarget.isSVEorStreamingSVEAvailable() && - "Unexpected predicate load without SVE load instructions"); - Opc = AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO; - StackID = TargetStackID::ScalableVector; } break; case 24: diff --git a/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp b/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp index aed137c..1568161 100644 --- a/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp +++ b/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp @@ -57,10 +57,7 @@ static bool isPartOfZPRCalleeSaves(MachineBasicBlock::iterator I) { case AArch64::ST1B_2Z_IMM: case AArch64::STR_ZXI: case AArch64::LDR_ZXI: - case AArch64::CPY_ZPzI_B: - case AArch64::CMPNE_PPzZI_B: case AArch64::PTRUE_C_B: - case AArch64::PTRUE_B: return I->getFlag(MachineInstr::FrameSetup) || I->getFlag(MachineInstr::FrameDestroy); case AArch64::SEH_SavePReg: diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.td b/llvm/lib/Target/AArch64/AArch64RegisterInfo.td index 5d89862..ef974df 100644 --- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.td +++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.td @@ -980,19 +980,10 @@ class ZPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size, //****************************************************************************** // SVE predicate register classes. - -// Note: This hardware mode is enabled in AArch64Subtarget::getHwModeSet() -// (without the use of the table-gen'd predicates). -def SMEWithZPRPredicateSpills : HwMode<[Predicate<"false">]>; - -def PPRSpillFillRI : RegInfoByHwMode< - [DefaultMode, SMEWithZPRPredicateSpills], - [RegInfo<16,16,16>, RegInfo<16,128,128>]>; - class PPRClass<int firstreg, int lastreg, int step = 1> : RegisterClass<"AArch64", [ nxv16i1, nxv8i1, nxv4i1, nxv2i1, nxv1i1 ], 16, (sequence "P%u", firstreg, lastreg, step)> { - let RegInfos = PPRSpillFillRI; + let Size = 16; } def PPR : PPRClass<0, 15> { diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp index 98e0a11..12ddf47 100644 --- a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp @@ -86,11 +86,6 @@ static cl::alias AArch64StreamingStackHazardSize( cl::desc("alias for -aarch64-streaming-hazard-size"), cl::aliasopt(AArch64StreamingHazardSize)); -static cl::opt<bool> EnableZPRPredicateSpills( - "aarch64-enable-zpr-predicate-spills", cl::init(false), cl::Hidden, - cl::desc( - "Enables spilling/reloading SVE predicates as data vectors (ZPRs)")); - static cl::opt<unsigned> VScaleForTuningOpt("sve-vscale-for-tuning", cl::Hidden, cl::desc("Force a vscale for tuning factor for SVE")); @@ -426,20 +421,6 @@ AArch64Subtarget::AArch64Subtarget(const Triple &TT, StringRef CPU, EnableSubregLiveness = EnableSubregLivenessTracking.getValue(); } -unsigned AArch64Subtarget::getHwModeSet() const { - AArch64HwModeBits Modes = AArch64HwModeBits::DefaultMode; - - // Use a special hardware mode in streaming[-compatible] functions with - // aarch64-enable-zpr-predicate-spills. This changes the spill size (and - // alignment) for the predicate register class. - if (EnableZPRPredicateSpills.getValue() && - (isStreaming() || isStreamingCompatible())) { - Modes |= AArch64HwModeBits::SMEWithZPRPredicateSpills; - } - - return to_underlying(Modes); -} - const CallLowering *AArch64Subtarget::getCallLowering() const { return CallLoweringInfo.get(); } diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h index 671df35..8974965 100644 --- a/llvm/lib/Target/AArch64/AArch64Subtarget.h +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h @@ -130,8 +130,6 @@ public: bool IsStreaming = false, bool IsStreamingCompatible = false, bool HasMinSize = false); - virtual unsigned getHwModeSet() const override; - // Getters for SubtargetFeatures defined in tablegen #define GET_SUBTARGETINFO_MACRO(ATTRIBUTE, DEFAULT, GETTER) \ bool GETTER() const { return ATTRIBUTE; } diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td index be44b8f..33f35ad 100644 --- a/llvm/lib/Target/AArch64/SMEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td @@ -58,20 +58,6 @@ def FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO : let hasSideEffects = 0; } -def SPILL_PPR_TO_ZPR_SLOT_PSEUDO : - Pseudo<(outs), (ins PPRorPNRAny:$Pt, GPR64sp:$Rn, simm9:$imm9), []>, Sched<[]> -{ - let mayStore = 1; - let hasSideEffects = 0; -} - -def FILL_PPR_FROM_ZPR_SLOT_PSEUDO : - Pseudo<(outs PPRorPNRAny:$Pt), (ins GPR64sp:$Rn, simm9:$imm9), []>, Sched<[]> -{ - let mayLoad = 1; - let hasSideEffects = 0; -} - def SDTZALoadStore : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisPtrTy<1>, SDTCisInt<2>]>; // SME ZA loads and stores def AArch64SMELdr : SDNode<"AArch64ISD::SME_ZA_LDR", SDTZALoadStore, diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td index 9446144..6b3c151 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPU.td +++ b/llvm/lib/Target/AMDGPU/AMDGPU.td @@ -1411,20 +1411,6 @@ def FeatureGloballyAddressableScratch : SubtargetFeature< "FLAT instructions can access scratch memory for any thread in any wave" >; -// FIXME: Remove after all users are migrated to attribute. -def FeatureDynamicVGPR : SubtargetFeature <"dynamic-vgpr", - "DynamicVGPR", - "true", - "Enable dynamic VGPR mode" ->; - -// FIXME: Remove after all users are migrated to attribute. -def FeatureDynamicVGPRBlockSize32 : SubtargetFeature<"dynamic-vgpr-block-size-32", - "DynamicVGPRBlockSize32", - "true", - "Use a block size of 32 for dynamic VGPR allocation (default is 16)" ->; - // Enable the use of SCRATCH_STORE/LOAD_BLOCK instructions for saving and // restoring the callee-saved registers. def FeatureUseBlockVGPROpsForCSR : SubtargetFeature<"block-vgpr-csr", @@ -1462,6 +1448,12 @@ def Feature45BitNumRecordsBufferResource : SubtargetFeature< "45-bit-num-records "The buffer resource (V#) supports 45-bit num_records" >; +def FeatureCluster : SubtargetFeature< "cluster", + "HasCluster", + "true", + "Has cluster support" +>; + // Dummy feature used to disable assembler instructions. def FeatureDisable : SubtargetFeature<"", "FeatureDisable","true", @@ -2128,6 +2120,7 @@ def FeatureISAVersion12_50 : FeatureSet< Feature45BitNumRecordsBufferResource, FeatureSupportsXNACK, FeatureXNACK, + FeatureCluster, ]>; def FeatureISAVersion12_51 : FeatureSet< diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h index a54d665..879bf5a 100644 --- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h +++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h @@ -288,6 +288,8 @@ protected: bool Has45BitNumRecordsBufferResource = false; + bool HasCluster = false; + // Dummy feature to use for assembler in tablegen. bool FeatureDisable = false; @@ -1837,7 +1839,7 @@ public: } /// \returns true if the subtarget supports clusters of workgroups. - bool hasClusters() const { return GFX1250Insts; } + bool hasClusters() const { return HasCluster; } /// \returns true if the subtarget requires a wait for xcnt before atomic /// flat/global stores & rmw. diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp index 20fa141..f7f4d46 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp @@ -1353,11 +1353,6 @@ unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, if (DynamicVGPRBlockSize != 0) return DynamicVGPRBlockSize; - // Temporarily check the subtarget feature, until we fully switch to using - // attributes. - if (STI->getFeatureBits().test(FeatureDynamicVGPR)) - return STI->getFeatureBits().test(FeatureDynamicVGPRBlockSize32) ? 32 : 16; - bool IsWave32 = EnableWavefrontSize32 ? *EnableWavefrontSize32 : STI->getFeatureBits().test(FeatureWavefrontSize32); @@ -1412,10 +1407,7 @@ unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI, if (Features.test(FeatureGFX90AInsts)) return 512; - // Temporarily check the subtarget feature, until we fully switch to using - // attributes. - if (DynamicVGPRBlockSize != 0 || - STI->getFeatureBits().test(FeatureDynamicVGPR)) + if (DynamicVGPRBlockSize != 0) // On GFX12 we can allocate at most 8 blocks of VGPRs. return 8 * getVGPRAllocGranule(STI, DynamicVGPRBlockSize); return getAddressableNumArchVGPRs(STI); diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp index f9bdc09..77913f2 100644 --- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp +++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp @@ -149,6 +149,9 @@ void NVPTXInstPrinter::printCvtMode(const MCInst *MI, int OpNum, raw_ostream &O, case NVPTX::PTXCvtMode::RNA: O << ".rna"; return; + case NVPTX::PTXCvtMode::RS: + O << ".rs"; + return; } } llvm_unreachable("Invalid conversion modifier"); diff --git a/llvm/lib/Target/NVPTX/NVPTX.h b/llvm/lib/Target/NVPTX/NVPTX.h index 77a0e03..1e0f747 100644 --- a/llvm/lib/Target/NVPTX/NVPTX.h +++ b/llvm/lib/Target/NVPTX/NVPTX.h @@ -207,6 +207,7 @@ enum CvtMode { RM, RP, RNA, + RS, BASE_MASK = 0x0F, FTZ_FLAG = 0x10, diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index 8c21746..bc047a4a 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -1096,9 +1096,10 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, // Enable custom lowering for the following: // * MVT::i128 - clusterlaunchcontrol // * MVT::i32 - prmt + // * MVT::v4f32 - cvt_rs fp{4/6/8}x4 intrinsics // * MVT::Other - internal.addrspace.wrap - setOperationAction(ISD::INTRINSIC_WO_CHAIN, {MVT::i32, MVT::i128, MVT::Other}, - Custom); + setOperationAction(ISD::INTRINSIC_WO_CHAIN, + {MVT::i32, MVT::i128, MVT::v4f32, MVT::Other}, Custom); } const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const { @@ -1181,6 +1182,11 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const { NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT) MAKE_CASE( NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT) + MAKE_CASE(NVPTXISD::CVT_E4M3X4_F32X4_RS_SF) + MAKE_CASE(NVPTXISD::CVT_E5M2X4_F32X4_RS_SF) + MAKE_CASE(NVPTXISD::CVT_E2M3X4_F32X4_RS_SF) + MAKE_CASE(NVPTXISD::CVT_E3M2X4_F32X4_RS_SF) + MAKE_CASE(NVPTXISD::CVT_E2M1X4_F32X4_RS_SF) } return nullptr; @@ -2903,6 +2909,61 @@ static SDValue LowerClusterLaunchControlQueryCancel(SDValue Op, {TryCancelResponse0, TryCancelResponse1}); } +static SDValue lowerCvtRSIntrinsics(SDValue Op, SelectionDAG &DAG) { + SDNode *N = Op.getNode(); + SDLoc DL(N); + SDValue F32Vec = N->getOperand(1); + SDValue RBits = N->getOperand(2); + + unsigned IntrinsicID = N->getConstantOperandVal(0); + + // Extract the 4 float elements from the vector + SmallVector<SDValue, 6> Ops; + for (unsigned i = 0; i < 4; ++i) + Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, F32Vec, + DAG.getIntPtrConstant(i, DL))); + + using NVPTX::PTXCvtMode::CvtMode; + + auto [OpCode, RetTy, CvtModeFlag] = + [&]() -> std::tuple<NVPTXISD::NodeType, MVT::SimpleValueType, uint32_t> { + switch (IntrinsicID) { + case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite: + return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8, + CvtMode::RS | CvtMode::RELU_FLAG}; + case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite: + return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS}; + case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite: + return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8, + CvtMode::RS | CvtMode::RELU_FLAG}; + case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite: + return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS}; + case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite: + return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8, + CvtMode::RS | CvtMode::RELU_FLAG}; + case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite: + return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS}; + case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite: + return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8, + CvtMode::RS | CvtMode::RELU_FLAG}; + case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite: + return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS}; + case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite: + return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16, + CvtMode::RS | CvtMode::RELU_FLAG}; + case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite: + return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16, CvtMode::RS}; + default: + llvm_unreachable("unsupported/unhandled intrinsic"); + } + }(); + + Ops.push_back(RBits); + Ops.push_back(DAG.getConstant(CvtModeFlag, DL, MVT::i32)); + + return DAG.getNode(OpCode, DL, RetTy, Ops); +} + static SDValue lowerPrmtIntrinsic(SDValue Op, SelectionDAG &DAG) { const unsigned Mode = [&]() { switch (Op->getConstantOperandVal(0)) { @@ -2972,6 +3033,17 @@ static SDValue lowerIntrinsicWOChain(SDValue Op, SelectionDAG &DAG) { case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y: case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z: return LowerClusterLaunchControlQueryCancel(Op, DAG); + case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite: + case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite: + case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite: + case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite: + case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite: + case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite: + case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite: + case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite: + case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite: + case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite: + return lowerCvtRSIntrinsics(Op, DAG); } } diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h index 769d2fe..63fa0bb 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h @@ -79,6 +79,11 @@ enum NodeType : unsigned { CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_X, CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Y, CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Z, + CVT_E4M3X4_F32X4_RS_SF, + CVT_E5M2X4_F32X4_RS_SF, + CVT_E2M3X4_F32X4_RS_SF, + CVT_E3M2X4_F32X4_RS_SF, + CVT_E2M1X4_F32X4_RS_SF, FIRST_MEMORY_OPCODE, diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td index 4cacee2..6c14cf0 100644 --- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td +++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td @@ -34,7 +34,8 @@ def CvtRN : PatLeaf<(i32 0x5)>; def CvtRZ : PatLeaf<(i32 0x6)>; def CvtRM : PatLeaf<(i32 0x7)>; def CvtRP : PatLeaf<(i32 0x8)>; -def CvtRNA : PatLeaf<(i32 0x9)>; +def CvtRNA : PatLeaf<(i32 0x9)>; +def CvtRS : PatLeaf<(i32 0xA)>; def CvtNONE_FTZ : PatLeaf<(i32 0x10)>; def CvtRNI_FTZ : PatLeaf<(i32 0x11)>; @@ -50,8 +51,9 @@ def CvtSAT : PatLeaf<(i32 0x20)>; def CvtSAT_FTZ : PatLeaf<(i32 0x30)>; def CvtNONE_RELU : PatLeaf<(i32 0x40)>; -def CvtRN_RELU : PatLeaf<(i32 0x45)>; -def CvtRZ_RELU : PatLeaf<(i32 0x46)>; +def CvtRN_RELU : PatLeaf<(i32 0x45)>; +def CvtRZ_RELU : PatLeaf<(i32 0x46)>; +def CvtRS_RELU : PatLeaf<(i32 0x4A)>; def CvtMode : Operand<i32> { let PrintMethod = "printCvtMode"; @@ -133,6 +135,11 @@ def hasSM100a : Predicate<"Subtarget->getSmVersion() == 100 && Subtarget->hasArc def hasSM101a : Predicate<"Subtarget->getSmVersion() == 101 && Subtarget->hasArchAccelFeatures()">; def hasSM120a : Predicate<"Subtarget->getSmVersion() == 120 && Subtarget->hasArchAccelFeatures()">; +def hasSM100aOrSM103a : + Predicate<"(Subtarget->getSmVersion() == 100 || " # + "Subtarget->getSmVersion() == 103) " # + "&& Subtarget->hasArchAccelFeatures()">; + // non-sync shfl instructions are not available on sm_70+ in PTX6.4+ def hasSHFL : Predicate<"!(Subtarget->getSmVersion() >= 70" "&& Subtarget->getPTXVersion() >= 64)">; @@ -593,6 +600,23 @@ let hasSideEffects = false in { defm CVT_f16x2 : CVT_FROM_FLOAT_V2_SM80<"f16x2", B32>; defm CVT_bf16x2 : CVT_FROM_FLOAT_V2_SM80<"bf16x2", B32>; + + multiclass CVT_FROM_FLOAT_V2_RS<string FromName, RegisterClass RC> { + def _f32_rs : + BasicFlagsNVPTXInst<(outs RC:$dst), + (ins B32:$src1, B32:$src2, B32:$src3), + (ins CvtMode:$mode), + "cvt${mode:base}${mode:relu}." # FromName # ".f32">; + + def _f32_rs_sf : + BasicFlagsNVPTXInst<(outs RC:$dst), + (ins B32:$src1, B32:$src2, B32:$src3), + (ins CvtMode:$mode), + "cvt${mode:base}${mode:relu}.satfinite." # FromName # ".f32">; + } + + defm CVT_f16x2 : CVT_FROM_FLOAT_V2_RS<"f16x2", B32>; + defm CVT_bf16x2 : CVT_FROM_FLOAT_V2_RS<"bf16x2", B32>; // FP8 conversions. multiclass CVT_TO_F8X2<string F8Name> { @@ -619,6 +643,15 @@ let hasSideEffects = false in { def CVT_f16x2_e4m3x2 : CVT_f16x2_fp8<"e4m3">; def CVT_f16x2_e5m2x2 : CVT_f16x2_fp8<"e5m2">; + + class CVT_TO_FP8X4<string F8Name> : + NVPTXInst<(outs B32:$dst), + (ins B32:$src1, B32:$src2, B32:$src3, B32:$src4, B32:$src5, CvtMode:$mode), + "cvt${mode:base}${mode:relu}.satfinite." # F8Name # + "x4.f32 \t$dst, {{$src1, $src2, $src3, $src4}}, $src5;">; + + def CVT_e4m3x4_f32x4_rs_sf : CVT_TO_FP8X4<"e4m3">; + def CVT_e5m2x4_f32x4_rs_sf : CVT_TO_FP8X4<"e5m2">; // Float to TF32 conversions multiclass CVT_TO_TF32<string Modifier, list<Predicate> Preds = [hasPTX<78>, hasSM<90>]> { @@ -652,6 +685,15 @@ let hasSideEffects = false in { "cvt${mode:base}${mode:relu}.f16x2." # type>; } + class CVT_TO_FP6X4<string F6Name> : + NVPTXInst<(outs B32:$dst), + (ins B32:$src1, B32:$src2, B32:$src3, B32:$src4, B32:$src5, CvtMode:$mode), + "cvt${mode:base}${mode:relu}.satfinite." # F6Name # + "x4.f32 \t$dst, {{$src1, $src2, $src3, $src4}}, $src5;">; + + def CVT_e2m3x4_f32x4_rs_sf : CVT_TO_FP6X4<"e2m3">; + def CVT_e3m2x4_f32x4_rs_sf : CVT_TO_FP6X4<"e3m2">; + // FP4 conversions. def CVT_e2m1x2_f32_sf : NVPTXInst<(outs B16:$dst), (ins B32:$src1, B32:$src2, CvtMode:$mode), @@ -668,6 +710,12 @@ let hasSideEffects = false in { "cvt.u8.u16 \t%e2m1x2_in, $src; \n\t", "cvt${mode:base}${mode:relu}.f16x2.e2m1x2 \t$dst, %e2m1x2_in; \n\t", "}}"), []>; + + def CVT_e2m1x4_f32x4_rs_sf : + NVPTXInst<(outs B16:$dst), + (ins B32:$src1, B32:$src2, B32:$src3, B32:$src4, B32:$src5, CvtMode:$mode), + "cvt${mode:base}${mode:relu}.satfinite.e2m1x4.f32 \t" # + "$dst, {{$src1, $src2, $src3, $src4}}, $src5;">; // UE8M0x2 conversions. class CVT_f32_to_ue8m0x2<string sat = ""> : diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td index e91171c..a8b854f 100644 --- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td +++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td @@ -1782,11 +1782,32 @@ def : Pat<(int_nvvm_ff2bf16x2_rn_relu f32:$a, f32:$b), (CVT_bf16x2_f32 $a, $b, C def : Pat<(int_nvvm_ff2bf16x2_rz f32:$a, f32:$b), (CVT_bf16x2_f32 $a, $b, CvtRZ)>; def : Pat<(int_nvvm_ff2bf16x2_rz_relu f32:$a, f32:$b), (CVT_bf16x2_f32 $a, $b, CvtRZ_RELU)>; +let Predicates = [hasPTX<87>, hasSM100aOrSM103a] in { +def : Pat<(int_nvvm_ff2bf16x2_rs f32:$a, f32:$b, i32:$c), + (CVT_bf16x2_f32_rs $a, $b, $c, CvtRS)>; +def : Pat<(int_nvvm_ff2bf16x2_rs_relu f32:$a, f32:$b, i32:$c), + (CVT_bf16x2_f32_rs $a, $b, $c, CvtRS_RELU)>; +def : Pat<(int_nvvm_ff2bf16x2_rs_satfinite f32:$a, f32:$b, i32:$c), + (CVT_bf16x2_f32_rs_sf $a, $b, $c, CvtRS)>; +def : Pat<(int_nvvm_ff2bf16x2_rs_relu_satfinite f32:$a, f32:$b, i32:$c), + (CVT_bf16x2_f32_rs_sf $a, $b, $c, CvtRS_RELU)>; +} + def : Pat<(int_nvvm_ff2f16x2_rn f32:$a, f32:$b), (CVT_f16x2_f32 $a, $b, CvtRN)>; def : Pat<(int_nvvm_ff2f16x2_rn_relu f32:$a, f32:$b), (CVT_f16x2_f32 $a, $b, CvtRN_RELU)>; def : Pat<(int_nvvm_ff2f16x2_rz f32:$a, f32:$b), (CVT_f16x2_f32 $a, $b, CvtRZ)>; def : Pat<(int_nvvm_ff2f16x2_rz_relu f32:$a, f32:$b), (CVT_f16x2_f32 $a, $b, CvtRZ_RELU)>; +let Predicates = [hasPTX<87>, hasSM100aOrSM103a] in { +def : Pat<(int_nvvm_ff2f16x2_rs f32:$a, f32:$b, i32:$c), + (CVT_f16x2_f32_rs $a, $b, $c, CvtRS)>; +def : Pat<(int_nvvm_ff2f16x2_rs_relu f32:$a, f32:$b, i32:$c), + (CVT_f16x2_f32_rs $a, $b, $c, CvtRS_RELU)>; +def : Pat<(int_nvvm_ff2f16x2_rs_satfinite f32:$a, f32:$b, i32:$c), + (CVT_f16x2_f32_rs_sf $a, $b, $c, CvtRS)>; +def : Pat<(int_nvvm_ff2f16x2_rs_relu_satfinite f32:$a, f32:$b, i32:$c), + (CVT_f16x2_f32_rs_sf $a, $b, $c, CvtRS_RELU)>; +} def : Pat<(int_nvvm_f2bf16_rn f32:$a), (CVT_bf16_f32 $a, CvtRN)>; def : Pat<(int_nvvm_f2bf16_rn_relu f32:$a), (CVT_bf16_f32 $a, CvtRN_RELU)>; def : Pat<(int_nvvm_f2bf16_rz f32:$a), (CVT_bf16_f32 $a, CvtRZ)>; @@ -1929,6 +1950,52 @@ let Predicates = [hasPTX<86>, hasSM<100>, hasArchAccelFeatures] in { (CVT_bf16x2_ue8m0x2 $a)>; } +def SDT_CVT_F32X4_TO_FPX4_RS_VEC : + SDTypeProfile<1, 6, [SDTCisVec<0>, SDTCisFP<1>, SDTCisFP<2>, SDTCisFP<3>, + SDTCisFP<4>, SDTCisInt<5>, SDTCisInt<6>]>; + +def SDT_CVT_F32X4_TO_FPX4_RS_INT : + SDTypeProfile<1, 6, [SDTCisInt<0>, SDTCisFP<1>, SDTCisFP<2>, SDTCisFP<3>, + SDTCisFP<4>, SDTCisInt<5>, SDTCisInt<6>]>; + +class CVT_F32X4_TO_FPX4_RS_SF_NODE<string FPName, SDTypeProfile SDT> : + SDNode<"NVPTXISD::CVT_" # FPName # "X4_F32X4_RS_SF", SDT, []>; + +multiclass CVT_F32X4_TO_FPX4_RS_SF_VEC<string FPName, VTVec RetTy> { + def : Pat<(RetTy (CVT_F32X4_TO_FPX4_RS_SF_NODE<!toupper(FPName), + SDT_CVT_F32X4_TO_FPX4_RS_VEC> + f32:$f1, f32:$f2, f32:$f3, f32:$f4, i32:$rbits, CvtRS)), + (!cast<NVPTXInst>("CVT_" # FPName # "x4_f32x4_rs_sf") + $f1, $f2, $f3, $f4, $rbits, CvtRS)>; + + def : Pat<(RetTy (CVT_F32X4_TO_FPX4_RS_SF_NODE<!toupper(FPName), + SDT_CVT_F32X4_TO_FPX4_RS_VEC> + f32:$f1, f32:$f2, f32:$f3, f32:$f4, i32:$rbits, CvtRS_RELU)), + (!cast<NVPTXInst>("CVT_" # FPName # "x4_f32x4_rs_sf") + $f1, $f2, $f3, $f4, $rbits, CvtRS_RELU)>; +} + +// RS rounding mode conversions +let Predicates = [hasPTX<87>, hasSM100aOrSM103a] in { +// FP8x4 conversions +defm : CVT_F32X4_TO_FPX4_RS_SF_VEC<"e4m3", v4i8>; +defm : CVT_F32X4_TO_FPX4_RS_SF_VEC<"e5m2", v4i8>; + +// FP6x4 conversions +defm : CVT_F32X4_TO_FPX4_RS_SF_VEC<"e2m3", v4i8>; +defm : CVT_F32X4_TO_FPX4_RS_SF_VEC<"e3m2", v4i8>; + +// FP4x4 conversions +def : Pat<(i16 (CVT_F32X4_TO_FPX4_RS_SF_NODE<"E2M1", + SDT_CVT_F32X4_TO_FPX4_RS_INT> + f32:$f1, f32:$f2, f32:$f3, f32:$f4, i32:$rbits, CvtRS)), + (CVT_e2m1x4_f32x4_rs_sf $f1, $f2, $f3, $f4, $rbits, CvtRS)>; +def : Pat<(i16 (CVT_F32X4_TO_FPX4_RS_SF_NODE<"E2M1", + SDT_CVT_F32X4_TO_FPX4_RS_INT> + f32:$f1, f32:$f2, f32:$f3, f32:$f4, i32:$rbits, CvtRS_RELU)), + (CVT_e2m1x4_f32x4_rs_sf $f1, $f2, $f3, $f4, $rbits, CvtRS_RELU)>; +} + // // FNS // @@ -4461,6 +4528,10 @@ class WMMA_REGINFO<WMMA_REGS r, string op, string metadata = "", string kind = " !eq(ptx_elt_type, "e2m1"), !ne(kind, "")) : [hasSM120a, hasPTX<87>], + !and(!or(!eq(ptx_elt_type,"e4m3"), + !eq(ptx_elt_type,"e5m2")), + !eq(geom, "m16n8k16")) : [hasSM<89>, hasPTX<87>], + !or(!eq(ptx_elt_type, "e4m3"), !eq(ptx_elt_type, "e5m2")) : [hasSM<89>, hasPTX<84>], @@ -4476,6 +4547,11 @@ class WMMA_REGINFO<WMMA_REGS r, string op, string metadata = "", string kind = " !and(!eq(geom, "m8n8k4"), !eq(ptx_elt_type, "f64")) : [hasSM<80>, hasPTX<70>], + !and(!or(!eq(geom, "m16n8k4"), + !eq(geom, "m16n8k8"), + !eq(geom, "m16n8k16")), + !eq(ptx_elt_type, "f64")) : [hasSM<90>, hasPTX<78>], + // fp16 -> fp16/fp32 @ m8n32k16/m32n8k16 !and(!or(!eq(geom, "m8n32k16"), !eq(geom, "m32n8k16")), @@ -4760,8 +4836,8 @@ defset list<WMMA_INSTR> WMMAs = { // MMA class MMA<WMMA_REGINFO FragA, WMMA_REGINFO FragB, WMMA_REGINFO FragC, WMMA_REGINFO FragD, - string ALayout, string BLayout, int Satfinite, string b1op> - : WMMA_INSTR<MMA_NAME<ALayout, BLayout, Satfinite, b1op, FragA, FragB, FragC, FragD>.record, + string ALayout, string BLayout, int Satfinite, string b1op, string Kind> + : WMMA_INSTR<MMA_NAME<ALayout, BLayout, Satfinite, b1op, Kind, FragA, FragB, FragC, FragD>.record, [FragA.Ins, FragB.Ins, FragC.Ins]>, // Requires does not seem to have effect on Instruction w/o Patterns. // We set it here anyways and propagate to the Pat<> we construct below. @@ -4776,6 +4852,7 @@ class MMA<WMMA_REGINFO FragA, WMMA_REGINFO FragB, # FragA.geom # "." # ALayout # "." # BLayout + # !if(!ne(Kind, ""), "." # Kind, "") # !if(Satfinite, ".satfinite", "") # TypeList # b1op # "\n\t\t" @@ -4792,13 +4869,15 @@ defset list<WMMA_INSTR> MMAs = { foreach satf = [0, 1] in { foreach op = NVVM_MMA_OPS.all_mma_ops in { foreach b1op = NVVM_MMA_B1OPS<op>.ret in { - if NVVM_MMA_SUPPORTED<op, layout_a, layout_b, satf>.ret then { - def : MMA<WMMA_REGINFO<op[0], "mma">, - WMMA_REGINFO<op[1], "mma">, - WMMA_REGINFO<op[2], "mma">, - WMMA_REGINFO<op[3], "mma">, - layout_a, layout_b, satf, b1op>; - } + foreach kind = ["", "kind::f8f6f4"] in { + if NVVM_MMA_SUPPORTED<op, layout_a, layout_b, kind, satf>.ret then { + def : MMA<WMMA_REGINFO<op[0], "mma", "", kind>, + WMMA_REGINFO<op[1], "mma", "", kind>, + WMMA_REGINFO<op[2], "mma", "", kind>, + WMMA_REGINFO<op[3], "mma", "", kind>, + layout_a, layout_b, satf, b1op, kind>; + } + } // kind } // b1op } // op } // satf diff --git a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp index 3090ad3..27fba34 100644 --- a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp +++ b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp @@ -407,6 +407,7 @@ bool X86InstructionSelector::select(MachineInstr &I) { case TargetOpcode::G_TRUNC: return selectTruncOrPtrToInt(I, MRI, MF); case TargetOpcode::G_INTTOPTR: + case TargetOpcode::G_FREEZE: return selectCopy(I, MRI); case TargetOpcode::G_ZEXT: return selectZext(I, MRI, MF); diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp index e7709ef..11ef721 100644 --- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp +++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp @@ -89,9 +89,29 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI, // 32/64-bits needs support for s64/s128 to handle cases: // s64 = EXTEND (G_IMPLICIT_DEF s32) -> s64 = G_IMPLICIT_DEF // s128 = EXTEND (G_IMPLICIT_DEF s32/s64) -> s128 = G_IMPLICIT_DEF - getActionDefinitionsBuilder(G_IMPLICIT_DEF) + getActionDefinitionsBuilder( + {G_IMPLICIT_DEF, G_PHI, G_FREEZE, G_CONSTANT_FOLD_BARRIER}) .legalFor({p0, s1, s8, s16, s32, s64}) - .legalFor(Is64Bit, {s128}); + .legalFor(UseX87, {s80}) + .legalFor(Is64Bit, {s128}) + .legalFor(HasSSE2, {v16s8, v8s16, v4s32, v2s64}) + .legalFor(HasAVX, {v32s8, v16s16, v8s32, v4s64}) + .legalFor(HasAVX512, {v64s8, v32s16, v16s32, v8s64}) + .widenScalarOrEltToNextPow2(0, /*Min=*/8) + .clampScalarOrElt(0, s8, sMaxScalar) + .moreElementsToNextPow2(0) + .clampMinNumElements(0, s8, 16) + .clampMinNumElements(0, s16, 8) + .clampMinNumElements(0, s32, 4) + .clampMinNumElements(0, s64, 2) + .clampMaxNumElements(0, s8, HasAVX512 ? 64 : (HasAVX ? 32 : 16)) + .clampMaxNumElements(0, s16, HasAVX512 ? 32 : (HasAVX ? 16 : 8)) + .clampMaxNumElements(0, s32, HasAVX512 ? 16 : (HasAVX ? 8 : 4)) + .clampMaxNumElements(0, s64, HasAVX512 ? 8 : (HasAVX ? 4 : 2)) + .clampMaxNumElements(0, p0, + Is64Bit ? s64MaxVector.getNumElements() + : s32MaxVector.getNumElements()) + .scalarizeIf(scalarOrEltWiderThan(0, 64), 0); getActionDefinitionsBuilder(G_CONSTANT) .legalFor({p0, s8, s16, s32}) @@ -289,26 +309,6 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI, .clampScalar(1, s16, sMaxScalar) .scalarSameSizeAs(0, 1); - // control flow - getActionDefinitionsBuilder(G_PHI) - .legalFor({s8, s16, s32, p0}) - .legalFor(UseX87, {s80}) - .legalFor(Is64Bit, {s64}) - .legalFor(HasSSE1, {v16s8, v8s16, v4s32, v2s64}) - .legalFor(HasAVX, {v32s8, v16s16, v8s32, v4s64}) - .legalFor(HasAVX512, {v64s8, v32s16, v16s32, v8s64}) - .clampMinNumElements(0, s8, 16) - .clampMinNumElements(0, s16, 8) - .clampMinNumElements(0, s32, 4) - .clampMinNumElements(0, s64, 2) - .clampMaxNumElements(0, s8, HasAVX512 ? 64 : (HasAVX ? 32 : 16)) - .clampMaxNumElements(0, s16, HasAVX512 ? 32 : (HasAVX ? 16 : 8)) - .clampMaxNumElements(0, s32, HasAVX512 ? 16 : (HasAVX ? 8 : 4)) - .clampMaxNumElements(0, s64, HasAVX512 ? 8 : (HasAVX ? 4 : 2)) - .widenScalarToNextPow2(0, /*Min=*/32) - .clampScalar(0, s8, sMaxScalar) - .scalarize(0); - getActionDefinitionsBuilder(G_BRCOND).legalFor({s1}); // pointer handling @@ -592,11 +592,6 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI, .minScalar(0, LLT::scalar(32)) .libcall(); - getActionDefinitionsBuilder({G_FREEZE, G_CONSTANT_FOLD_BARRIER}) - .legalFor({s8, s16, s32, s64, p0}) - .widenScalarToNextPow2(0, /*Min=*/8) - .clampScalar(0, s8, sMaxScalar); - getLegacyLegalizerInfo().computeTables(); verify(*STI.getInstrInfo()); } diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td index 564810c..83bd6ac 100644 --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -662,6 +662,7 @@ def VINSERTPSZrri : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst), "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", [(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, timm:$src3))]>, EVEX, VVVV, Sched<[SchedWriteFShuffle.XMM]>; +let mayLoad = 1 in def VINSERTPSZrmi : AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst), (ins VR128X:$src1, f32mem:$src2, u8imm:$src3), "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", @@ -1293,6 +1294,7 @@ multiclass avx512_subvec_broadcast_rm<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode, X86VectorVTInfo _Dst, X86VectorVTInfo _Src> { + let hasSideEffects = 0, mayLoad = 1 in defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst), (ins _Src.MemOp:$src), OpcodeStr, "$src", "$src", (_Dst.VT (OpNode addr:$src))>, @@ -1748,6 +1750,7 @@ let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in { (_.VT (X86VPermt2 _.RC:$src1, IdxVT.RC:$src2, _.RC:$src3)), 1>, EVEX, VVVV, AVX5128IBase, Sched<[sched]>; + let hasSideEffects = 0, mayLoad = 1 in defm rm: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins IdxVT.RC:$src2, _.MemOp:$src3), OpcodeStr, "$src3, $src2", "$src2, $src3", @@ -1759,7 +1762,7 @@ let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in { multiclass avx512_perm_t_mb<bits<8> opc, string OpcodeStr, X86FoldableSchedWrite sched, X86VectorVTInfo _, X86VectorVTInfo IdxVT> { - let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in + let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain, hasSideEffects = 0, mayLoad = 1 in defm rmb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins IdxVT.RC:$src2, _.ScalarMemOp:$src3), OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"), @@ -1987,6 +1990,7 @@ multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeSAE, _.FRC:$src2, timm:$cc))]>, EVEX, VVVV, VEX_LIG, Sched<[sched]>, SIMD_EXC; + let mayLoad = 1 in def rmi : AVX512Ii8<0xC2, MRMSrcMem, (outs _.KRC:$dst), (ins _.FRC:$src1, _.ScalarMemOp:$src2, u8imm:$cc), @@ -2145,6 +2149,7 @@ multiclass avx512_icmp_cc<bits<8> opc, string Suffix, PatFrag Frag, (_.VT _.RC:$src2), cond)))]>, EVEX, VVVV, Sched<[sched]>; + let mayLoad = 1 in def rmi : AVX512AIi8<opc, MRMSrcMem, (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, u8imm:$cc), !strconcat("vpcmp", Suffix, @@ -2167,6 +2172,7 @@ multiclass avx512_icmp_cc<bits<8> opc, string Suffix, PatFrag Frag, (_.VT _.RC:$src2), cond))))]>, EVEX, VVVV, EVEX_K, Sched<[sched]>; + let mayLoad = 1 in def rmik : AVX512AIi8<opc, MRMSrcMem, (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2, u8imm:$cc), @@ -2198,6 +2204,7 @@ multiclass avx512_icmp_cc_rmb<bits<8> opc, string Suffix, PatFrag Frag, PatFrag Frag_su, X86FoldableSchedWrite sched, X86VectorVTInfo _, string Name> : avx512_icmp_cc<opc, Suffix, Frag, Frag_su, sched, _, Name> { + let mayLoad = 1 in { def rmbi : AVX512AIi8<opc, MRMSrcMem, (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$cc), @@ -2221,6 +2228,7 @@ multiclass avx512_icmp_cc_rmb<bits<8> opc, string Suffix, PatFrag Frag, (_.BroadcastLdFrag addr:$src2), cond))))]>, EVEX, VVVV, EVEX_K, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; + } def : Pat<(_.KVT (Frag:$cc (_.BroadcastLdFrag addr:$src2), (_.VT _.RC:$src1), cond)), @@ -2305,6 +2313,7 @@ let Uses = [MXCSR], mayRaiseFPException = 1 in { (X86cmpm_su (_.VT _.RC:$src1), (_.VT _.RC:$src2), timm:$cc), 1>, Sched<[sched]>; + let mayLoad = 1 in { defm rmi : AVX512_maskable_cmp<0xC2, MRMSrcMem, _, (outs _.KRC:$dst),(ins _.RC:$src1, _.MemOp:$src2, u8imm:$cc), "vcmp"#_.Suffix, @@ -2329,6 +2338,7 @@ let Uses = [MXCSR], mayRaiseFPException = 1 in { timm:$cc)>, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; } + } // Patterns for selecting with loads in other operand. def : Pat<(X86any_cmpm (_.LdFrag addr:$src2), (_.VT _.RC:$src1), @@ -3771,6 +3781,7 @@ def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src [(set VR128X:$dst, (v4i32 (scalar_to_vector GR32:$src)))]>, EVEX, Sched<[WriteVecMoveFromGpr]>; +let mayLoad = 1 in def VMOVDI2PDIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src), "vmovd\t{$src, $dst|$dst, $src}", [(set VR128X:$dst, @@ -3874,7 +3885,7 @@ def VMOVSS2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst), // Move Quadword Int to Packed Quadword Int // -let ExeDomain = SSEPackedInt in { +let ExeDomain = SSEPackedInt, mayLoad = 1, hasSideEffects = 0 in { def VMOVQI2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst), (ins i64mem:$src), "vmovq\t{$src, $dst|$dst, $src}", @@ -3930,13 +3941,13 @@ multiclass avx512_move_scalar<string asm, SDNode OpNode, PatFrag vzload_frag, (_.VT (OpNode _.RC:$src1, _.RC:$src2)), (_.VT _.RC:$src0))))], _.ExeDomain>, EVEX, VVVV, EVEX_K, Sched<[SchedWriteFShuffle.XMM]>; - let canFoldAsLoad = 1, isReMaterializable = 1 in { + let canFoldAsLoad = 1, isReMaterializable = 1, mayLoad = 1, hasSideEffects = 0 in { def rm : AVX512PI<0x10, MRMSrcMem, (outs _.RC:$dst), (ins _.ScalarMemOp:$src), !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [(set _.RC:$dst, (_.VT (vzload_frag addr:$src)))], _.ExeDomain>, EVEX, Sched<[WriteFLoad]>; // _alt version uses FR32/FR64 register class. - let isCodeGenOnly = 1 in + let isCodeGenOnly = 1, mayLoad = 1, hasSideEffects = 0 in def rm_alt : AVX512PI<0x10, MRMSrcMem, (outs _.FRC:$dst), (ins _.ScalarMemOp:$src), !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [(set _.FRC:$dst, (_.ScalarLdFrag addr:$src))], @@ -4557,6 +4568,7 @@ let Predicates = [HasAVX512] in { // AVX-512 - Non-temporals //===----------------------------------------------------------------------===// +let mayLoad = 1, hasSideEffects = 0 in { def VMOVNTDQAZrm : AVX512PI<0x2A, MRMSrcMem, (outs VR512:$dst), (ins i512mem:$src), "vmovntdqa\t{$src, $dst|$dst, $src}", [], SSEPackedInt>, Sched<[SchedWriteVecMoveLSNT.ZMM.RM]>, @@ -4575,11 +4587,12 @@ let Predicates = [HasVLX] in { [], SSEPackedInt>, Sched<[SchedWriteVecMoveLSNT.XMM.RM]>, EVEX, T8, PD, EVEX_V128, EVEX_CD8<64, CD8VF>; } +} multiclass avx512_movnt<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, X86SchedWriteMoveLS Sched, PatFrag st_frag = alignednontemporalstore> { - let SchedRW = [Sched.MR], AddedComplexity = 400 in + let mayStore = 1, SchedRW = [Sched.MR], AddedComplexity = 400 in def mr : AVX512PI<opc, MRMDestMem, (outs), (ins _.MemOp:$dst, _.RC:$src), !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [(st_frag (_.VT _.RC:$src), addr:$dst)], @@ -4682,6 +4695,7 @@ multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, IsCommutable, IsCommutable>, AVX512BIBase, EVEX, VVVV, Sched<[sched]>; + let mayLoad = 1, hasSideEffects = 0 in defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", @@ -4694,6 +4708,7 @@ multiclass avx512_binop_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode, X86VectorVTInfo _, X86FoldableSchedWrite sched, bit IsCommutable = 0> : avx512_binop_rm<opc, OpcodeStr, OpNode, _, sched, IsCommutable> { + let mayLoad = 1, hasSideEffects = 0 in defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr, "${src2}"#_.BroadcastStr#", $src1", @@ -4811,6 +4826,7 @@ multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, (_Src.VT _Src.RC:$src2))), IsCommutable>, AVX512BIBase, EVEX, VVVV, Sched<[sched]>; + let mayLoad = 1, hasSideEffects = 0 in { defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst), (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", @@ -4828,6 +4844,7 @@ multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, (_Brdct.VT (_Brdct.BroadcastLdFrag addr:$src2)))))>, AVX512BIBase, EVEX, VVVV, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; + } } defm VPADD : avx512_binop_rm_vl_all<0xFC, 0xFD, 0xFE, 0xD4, "vpadd", add, @@ -4893,6 +4910,7 @@ defm VPMULTISHIFTQB : avx512_binop_all<0x83, "vpmultishiftqb", SchedWriteVecALU, multiclass avx512_packs_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode, X86VectorVTInfo _Src, X86VectorVTInfo _Dst, X86FoldableSchedWrite sched> { + let mayLoad = 1, hasSideEffects = 0 in defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst), (ins _Src.RC:$src1, _Src.ScalarMemOp:$src2), OpcodeStr, @@ -4916,6 +4934,7 @@ multiclass avx512_packs_rm<bits<8> opc, string OpcodeStr, (_Src.VT _Src.RC:$src2))), IsCommutable, IsCommutable>, EVEX_CD8<_Src.EltSize, CD8VF>, EVEX, VVVV, Sched<[sched]>; + let mayLoad = 1, hasSideEffects = 0 in defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst), (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", @@ -5370,6 +5389,7 @@ multiclass avx512_fp_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _, (_.VT (VecNode _.RC:$src1, _.RC:$src2)), "_Int">, Sched<[sched]>; + let mayLoad = 1 in defm rm : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.IntScalarMemOp:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", @@ -5384,6 +5404,7 @@ multiclass avx512_fp_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _, Sched<[sched]> { let isCommutable = IsCommutable; } + let mayLoad = 1 in def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst), (ins _.FRC:$src1, _.ScalarMemOp:$src2), OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", @@ -5414,6 +5435,7 @@ multiclass avx512_fp_scalar_sae<bits<8> opc, string OpcodeStr,X86VectorVTInfo _, (_.VT (VecNode _.RC:$src1, _.RC:$src2)), "_Int">, Sched<[sched]>, SIMD_EXC; + let mayLoad = 1 in defm rm : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.IntScalarMemOp:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", @@ -5430,6 +5452,7 @@ multiclass avx512_fp_scalar_sae<bits<8> opc, string OpcodeStr,X86VectorVTInfo _, Sched<[sched]> { let isCommutable = IsCommutable; } + let mayLoad = 1 in def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst), (ins _.FRC:$src1, _.ScalarMemOp:$src2), OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", @@ -5509,6 +5532,7 @@ multiclass avx512_comutable_binop_s<bits<8> opc, string OpcodeStr, Sched<[sched]> { let isCommutable = 1; } + let mayLoad = 1 in def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst), (ins _.FRC:$src1, _.ScalarMemOp:$src2), OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", @@ -5737,6 +5761,7 @@ multiclass avx512_fp_scalef_p<bits<8> opc, string OpcodeStr, SDNode OpNode, "$src2, $src1", "$src1, $src2", (_.VT (OpNode _.RC:$src1, _.RC:$src2))>, EVEX, VVVV, Sched<[sched]>; + let mayLoad = 1 in { defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr#_.Suffix, "$src2, $src1", "$src1, $src2", @@ -5749,6 +5774,7 @@ multiclass avx512_fp_scalef_p<bits<8> opc, string OpcodeStr, SDNode OpNode, (OpNode _.RC:$src1, (_.VT (_.BroadcastLdFrag addr:$src2)))>, EVEX, VVVV, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; } + } } multiclass avx512_fp_scalef_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode, @@ -5759,6 +5785,7 @@ multiclass avx512_fp_scalef_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode, "$src2, $src1", "$src1, $src2", (_.VT (OpNode _.RC:$src1, _.RC:$src2))>, Sched<[sched]>; + let mayLoad = 1 in defm rm: AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.IntScalarMemOp:$src2), OpcodeStr#_.Suffix, "$src2, $src1", "$src1, $src2", @@ -5916,6 +5943,7 @@ multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM, "$src2, $src1", "$src1, $src2", (_.VT (OpNode _.RC:$src1, (i8 timm:$src2)))>, Sched<[sched]>; + let mayLoad = 1 in defm mi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst), (ins _.MemOp:$src1, u8imm:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", @@ -5928,7 +5956,7 @@ multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM, multiclass avx512_shift_rmbi<bits<8> opc, Format ImmFormM, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _> { - let ExeDomain = _.ExeDomain in + let ExeDomain = _.ExeDomain, mayLoad = 1 in defm mbi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst), (ins _.ScalarMemOp:$src1, u8imm:$src2), OpcodeStr, "$src2, ${src1}"#_.BroadcastStr, "${src1}"#_.BroadcastStr#", $src2", @@ -5946,6 +5974,7 @@ multiclass avx512_shift_rrm<bits<8> opc, string OpcodeStr, SDNode OpNode, "$src2, $src1", "$src1, $src2", (_.VT (OpNode _.RC:$src1, (SrcVT VR128X:$src2)))>, AVX512BIBase, EVEX, VVVV, Sched<[sched]>; + let mayLoad = 1 in defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, i128mem:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", @@ -6095,6 +6124,7 @@ multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode, "$src2, $src1", "$src1, $src2", (_.VT (OpNode _.RC:$src1, (_.VT _.RC:$src2)))>, AVX5128IBase, EVEX, VVVV, Sched<[sched]>; + let mayLoad = 1 in defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", @@ -6107,7 +6137,7 @@ multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode, multiclass avx512_var_shift_mb<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _> { - let ExeDomain = _.ExeDomain in + let ExeDomain = _.ExeDomain, mayLoad = 1 in defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr, "${src2}"#_.BroadcastStr#", $src1", @@ -6372,6 +6402,7 @@ multiclass avx512_permil_vec<bits<8> OpcVar, string OpcodeStr, SDNode OpNode, (_.VT (OpNode _.RC:$src1, (Ctrl.VT Ctrl.RC:$src2)))>, T8, PD, EVEX, VVVV, Sched<[sched]>; + let mayLoad = 1 in { defm rm: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, Ctrl.MemOp:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", @@ -6389,6 +6420,7 @@ multiclass avx512_permil_vec<bits<8> OpcVar, string OpcodeStr, SDNode OpNode, (Ctrl.VT (Ctrl.BroadcastLdFrag addr:$src2))))>, T8, PD, EVEX, VVVV, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>, Sched<[sched.Folded, sched.ReadAfterFold]>; + } } multiclass avx512_permil_vec_common<string OpcodeStr, bits<8> OpcVar, @@ -7258,6 +7290,7 @@ let ExeDomain = DstVT.ExeDomain, Uses = _Uses, (OpNode (DstVT.VT DstVT.RC:$src1), SrcRC:$src2))]>, EVEX, VVVV, Sched<[sched, ReadDefault, ReadInt2Fpu]>; + let mayLoad = 1 in def rm_Int : SI<opc, MRMSrcMem, (outs DstVT.RC:$dst), (ins DstVT.RC:$src1, x86memop:$src2), asm#"{"#mem#"}\t{$src2, $src1, $dst|$dst, $src1, $src2}", @@ -7400,6 +7433,7 @@ multiclass avx512_cvt_s_int_round<bits<8> opc, X86VectorVTInfo SrcVT, [(set DstVT.RC:$dst, (OpNodeRnd (SrcVT.VT SrcVT.RC:$src),(i32 timm:$rc)))]>, EVEX, VEX_LIG, EVEX_B, EVEX_RC, Sched<[sched]>; + let mayLoad = 1 in def rm_Int : SI<opc, MRMSrcMem, (outs DstVT.RC:$dst), (ins SrcVT.IntScalarMemOp:$src), !strconcat(asm,"\t{$src, $dst|$dst, $src}"), [(set DstVT.RC:$dst, (OpNode @@ -7451,6 +7485,7 @@ multiclass avx512_cvt_s<bits<8> opc, string asm, X86VectorVTInfo SrcVT, !strconcat(asm,"\t{$src, $dst|$dst, $src}"), [(set DstVT.RC:$dst, (OpNode SrcVT.FRC:$src))]>, EVEX, VEX_LIG, Sched<[sched]>, SIMD_EXC; + let mayLoad = 1 in def rm : AVX512<opc, MRMSrcMem, (outs DstVT.RC:$dst), (ins SrcVT.ScalarMemOp:$src), !strconcat(asm,"\t{$src, $dst|$dst, $src}"), [(set DstVT.RC:$dst, (OpNode (SrcVT.ScalarLdFrag addr:$src)))]>, @@ -7572,6 +7607,7 @@ let Predicates = [prd], ExeDomain = _SrcRC.ExeDomain in { !strconcat(asm,"\t{$src, $dst|$dst, $src}"), [(set _DstRC.RC:$dst, (OpNode _SrcRC.FRC:$src))]>, EVEX, VEX_LIG, Sched<[sched]>, SIMD_EXC; + let mayLoad = 1 in def rm : AVX512<opc, MRMSrcMem, (outs _DstRC.RC:$dst), (ins _SrcRC.ScalarMemOp:$src), !strconcat(asm,"\t{$src, $dst|$dst, $src}"), [(set _DstRC.RC:$dst, (OpNode (_SrcRC.ScalarLdFrag addr:$src)))]>, @@ -7587,6 +7623,7 @@ let Predicates = [prd], ExeDomain = _SrcRC.ExeDomain in { !strconcat(asm,"\t{{sae}, $src, $dst|$dst, $src, {sae}}"), [(set _DstRC.RC:$dst, (OpNodeSAE (_SrcRC.VT _SrcRC.RC:$src)))]>, EVEX, VEX_LIG, EVEX_B, Sched<[sched]>; + let mayLoad = 1 in def rm_Int : AVX512<opc, MRMSrcMem, (outs _DstRC.RC:$dst), (ins _SrcRC.IntScalarMemOp:$src), !strconcat(asm,"\t{$src, $dst|$dst, $src}"), @@ -7644,6 +7681,7 @@ multiclass avx512_cvt_fp_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _ (_.VT (OpNode (_.VT _.RC:$src1), (_Src.VT _Src.RC:$src2))), "_Int">, EVEX, VVVV, VEX_LIG, Sched<[sched]>; + let mayLoad = 1 in defm rm : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _Src.IntScalarMemOp:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", @@ -7807,6 +7845,7 @@ let Uses = [MXCSR], mayRaiseFPException = 1 in { _.ImmAllZerosV)>, EVEX, Sched<[sched]>; + let mayLoad = 1 in { defm rm : AVX512_maskable_cvt<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins MemOp:$src), (ins _.RC:$src0, MaskRC:$mask, MemOp:$src), @@ -7840,6 +7879,7 @@ let Uses = [MXCSR], mayRaiseFPException = 1 in { _.ImmAllZerosV)>, EVEX, EVEX_B, Sched<[sched.Folded]>; } + } } // Conversion with SAE - suppress all exceptions multiclass avx512_vcvt_fp_sae<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, @@ -8944,6 +8984,7 @@ multiclass avx512_cvtph2ps<X86VectorVTInfo _dest, X86VectorVTInfo _src, (X86any_cvtph2ps (_src.VT _src.RC:$src)), (X86cvtph2ps (_src.VT _src.RC:$src))>, T8, PD, Sched<[sched]>; + let mayLoad = 1 in defm rm : AVX512_maskable_split<0x13, MRMSrcMem, _dest, (outs _dest.RC:$dst), (ins x86memop:$src), "vcvtph2ps", "$src", "$src", (X86any_cvtph2ps (_src.VT ld_dag)), @@ -9161,6 +9202,7 @@ multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, SDNode OpNode, "$src2, $src1", "$src1, $src2", (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))>, EVEX, VVVV, VEX_LIG, Sched<[sched]>; + let mayLoad = 1 in defm rm : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.IntScalarMemOp:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", @@ -9621,6 +9663,7 @@ multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr, (i32 timm:$src3))), "_Int">, EVEX_B, Sched<[sched]>; + let mayLoad = 1 in defm rmi : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.IntScalarMemOp:$src2, i32u8imm:$src3), OpcodeStr, @@ -9999,6 +10042,7 @@ multiclass avx512_pmovx_common<bits<8> opc, string OpcodeStr, X86FoldableSchedWr (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src)))>, EVEX, Sched<[sched]>; + let mayLoad = 1 in defm rm : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst), (ins x86memop:$src), OpcodeStr ,"$src", "$src", (DestInfo.VT (LdFrag addr:$src))>, @@ -10601,6 +10645,7 @@ multiclass expand_by_vec_width<bits<8> opc, X86VectorVTInfo _, (null_frag)>, AVX5128IBase, Sched<[sched]>; + let mayLoad = 1 in defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.MemOp:$src1), OpcodeStr, "$src1", "$src1", (null_frag)>, @@ -10673,6 +10718,7 @@ multiclass avx512_unary_fp_packed_imm<bits<8> opc, string OpcodeStr, (OpNode (_.VT _.RC:$src1), (i32 timm:$src2)), (MaskOpNode (_.VT _.RC:$src1), (i32 timm:$src2))>, Sched<[sched]>; + let mayLoad = 1 in { defm rmi : AVX512_maskable_split<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.MemOp:$src1, i32u8imm:$src2), OpcodeStr#_.Suffix, "$src2, $src1", "$src1, $src2", @@ -10691,6 +10737,7 @@ multiclass avx512_unary_fp_packed_imm<bits<8> opc, string OpcodeStr, (i32 timm:$src2))>, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; } + } } //handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm),{sae} @@ -10739,6 +10786,7 @@ multiclass avx512_fp_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNode, (_.VT _.RC:$src2), (i32 timm:$src3))>, Sched<[sched]>; + let mayLoad = 1 in { defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3), OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3", @@ -10755,6 +10803,7 @@ multiclass avx512_fp_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNode, (i32 timm:$src3))>, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; } + } } //handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm) @@ -10770,6 +10819,7 @@ multiclass avx512_3Op_rm_imm8<bits<8> opc, string OpcodeStr, SDNode OpNode, (SrcInfo.VT SrcInfo.RC:$src2), (i8 timm:$src3)))>, Sched<[sched]>; + let mayLoad = 1 in defm rmi : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst), (ins SrcInfo.RC:$src1, SrcInfo.MemOp:$src2, u8imm:$src3), OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3", @@ -10788,7 +10838,7 @@ multiclass avx512_3Op_imm8<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _>: avx512_3Op_rm_imm8<opc, OpcodeStr, OpNode, sched, _, _>{ - let ExeDomain = _.ExeDomain, ImmT = Imm8 in + let ExeDomain = _.ExeDomain, ImmT = Imm8, mayLoad = 1 in defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3), OpcodeStr, "$src3, ${src2}"#_.BroadcastStr#", $src1", @@ -10811,6 +10861,7 @@ multiclass avx512_fp_scalar_imm<bits<8> opc, string OpcodeStr, SDNode OpNode, (_.VT _.RC:$src2), (i32 timm:$src3))>, Sched<[sched]>; + let mayLoad = 1 in defm rmi : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.IntScalarMemOp:$src2, i32u8imm:$src3), OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3", @@ -10979,6 +11030,7 @@ multiclass avx512_shuff_packed_128_common<bits<8> opc, string OpcodeStr, (CastInfo.VT (X86Shuf128 _.RC:$src1, _.RC:$src2, (i8 timm:$src3)))))>, Sched<[sched]>; + let mayLoad = 1 in { defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.MemOp:$src2, u8imm:$src3), OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3", @@ -11000,6 +11052,7 @@ multiclass avx512_shuff_packed_128_common<bits<8> opc, string OpcodeStr, (i8 timm:$src3)))))>, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; } + } } multiclass avx512_shuff_packed_128<string OpcodeStr, X86FoldableSchedWrite sched, @@ -11031,6 +11084,7 @@ multiclass avx512_valign<bits<8> opc, string OpcodeStr, OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3", (_.VT (X86VAlign _.RC:$src1, _.RC:$src2, (i8 timm:$src3)))>, Sched<[sched]>; + let mayLoad = 1 in { defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.MemOp:$src2, u8imm:$src3), OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3", @@ -11048,6 +11102,7 @@ multiclass avx512_valign<bits<8> opc, string OpcodeStr, (i8 timm:$src3))>, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; } + } } multiclass avx512_valign_common<string OpcodeStr, X86SchedWriteWidths sched, @@ -11202,6 +11257,7 @@ multiclass avx512_unary_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, (_.VT (OpNode (_.VT _.RC:$src1)))>, EVEX, AVX5128IBase, Sched<[sched]>; + let mayLoad = 1 in defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.MemOp:$src1), OpcodeStr, "$src1", "$src1", @@ -11214,6 +11270,7 @@ multiclass avx512_unary_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, multiclass avx512_unary_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _> : avx512_unary_rm<opc, OpcodeStr, OpNode, sched, _> { + let mayLoad = 1 in defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.ScalarMemOp:$src1), OpcodeStr, "${src1}"#_.BroadcastStr, @@ -11368,6 +11425,7 @@ multiclass avx512_movddup_128<bits<8> opc, string OpcodeStr, (ins _.RC:$src), OpcodeStr, "$src", "$src", (_.VT (X86VBroadcast (_.VT _.RC:$src)))>, EVEX, Sched<[sched]>; + let mayLoad = 1 in defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.ScalarMemOp:$src), OpcodeStr, "$src", "$src", (_.VT (_.BroadcastLdFrag addr:$src))>, @@ -11513,6 +11571,7 @@ defm VPEXTRQZ : avx512_extract_elt_dq<"vpextrq", v2i64x_info, GR64>, REX_W; multiclass avx512_insert_elt_m<bits<8> opc, string OpcodeStr, SDNode OpNode, X86VectorVTInfo _, PatFrag LdFrag, SDPatternOperator immoperator> { + let mayLoad = 1 in def rmi : AVX512Ii8<opc, MRMSrcMem, (outs _.RC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3), OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", @@ -11650,6 +11709,7 @@ multiclass avx512_psadbw_packed<bits<8> opc, SDNode OpNode, (OpNode (_src.VT _src.RC:$src1), (_src.VT _src.RC:$src2))))]>, Sched<[sched]>; + let mayLoad = 1 in def rm : AVX512BI<opc, MRMSrcMem, (outs _dst.RC:$dst), (ins _src.RC:$src1, _src.MemOp:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), @@ -11751,6 +11811,7 @@ multiclass avx512_ternlog<bits<8> opc, string OpcodeStr, SDNode OpNode, (_.VT _.RC:$src3), (i8 timm:$src4)), 1, 1>, AVX512AIi8Base, EVEX, VVVV, Sched<[sched]>; + let mayLoad = 1 in { defm rmi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src2, _.MemOp:$src3, u8imm:$src4), OpcodeStr, "$src4, $src3, $src2", "$src2, $src3, $src4", @@ -11770,6 +11831,7 @@ multiclass avx512_ternlog<bits<8> opc, string OpcodeStr, SDNode OpNode, (i8 timm:$src4)), 1, 0>, EVEX_B, AVX512AIi8Base, EVEX, VVVV, EVEX_CD8<_.EltSize, CD8VF>, Sched<[sched.Folded, sched.ReadAfterFold]>; + } }// Constraints = "$src1 = $dst" // Additional patterns for matching passthru operand in other positions. @@ -12016,6 +12078,7 @@ multiclass avx512_fixupimm_packed<bits<8> opc, string OpcodeStr, (_.VT _.RC:$src2), (TblVT.VT _.RC:$src3), (i32 timm:$src4))>, Sched<[sched]>; + let mayLoad = 1 in { defm rmi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src2, _.MemOp:$src3, i32u8imm:$src4), OpcodeStr#_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4", @@ -12033,6 +12096,7 @@ multiclass avx512_fixupimm_packed<bits<8> opc, string OpcodeStr, (TblVT.VT (TblVT.BroadcastLdFrag addr:$src3)), (i32 timm:$src4))>, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; + } } // Constraints = "$src1 = $dst" } @@ -12075,6 +12139,7 @@ multiclass avx512_fixupimm_scalar<bits<8> opc, string OpcodeStr, (_src3VT.VT _src3VT.RC:$src3), (i32 timm:$src4))>, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; + let mayLoad = 1 in defm rmi : AVX512_maskable_3src_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src2, _.ScalarMemOp:$src3, i32u8imm:$src4), OpcodeStr#_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4", @@ -12417,6 +12482,7 @@ multiclass VNNI_rmb<bits<8> Op, string OpStr, SDNode OpNode, VTI.RC:$src2, VTI.RC:$src3)), IsCommutable, IsCommutable>, EVEX, VVVV, T8, Sched<[sched]>; + let mayLoad = 1 in { defm rm : AVX512_maskable_3src<Op, MRMSrcMem, VTI, (outs VTI.RC:$dst), (ins VTI.RC:$src2, VTI.MemOp:$src3), OpStr, "$src3, $src2", "$src2, $src3", @@ -12435,6 +12501,7 @@ multiclass VNNI_rmb<bits<8> Op, string OpStr, SDNode OpNode, T8, Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>; } + } } multiclass VNNI_common<bits<8> Op, string OpStr, SDNode OpNode, @@ -12508,6 +12575,7 @@ multiclass VPSHUFBITQMB_rm<X86FoldableSchedWrite sched, X86VectorVTInfo VTI> { (X86Vpshufbitqmb_su (VTI.VT VTI.RC:$src1), (VTI.VT VTI.RC:$src2))>, EVEX, VVVV, T8, PD, Sched<[sched]>; + let mayLoad = 1 in defm rm : AVX512_maskable_cmp<0x8F, MRMSrcMem, VTI, (outs VTI.KRC:$dst), (ins VTI.RC:$src1, VTI.MemOp:$src2), "vpshufbitqmb", @@ -12557,7 +12625,7 @@ multiclass GF2P8AFFINE_avx512_rmb_imm<bits<8> Op, string OpStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo VTI, X86VectorVTInfo BcstVTI> : avx512_3Op_rm_imm8<Op, OpStr, OpNode, sched, VTI, VTI> { - let ExeDomain = VTI.ExeDomain in + let ExeDomain = VTI.ExeDomain, mayLoad = 1 in defm rmbi : AVX512_maskable<Op, MRMSrcMem, VTI, (outs VTI.RC:$dst), (ins VTI.RC:$src1, BcstVTI.ScalarMemOp:$src2, u8imm:$src3), OpStr, "$src3, ${src2}"#BcstVTI.BroadcastStr#", $src1", @@ -12660,6 +12728,7 @@ multiclass avx512_vp2intersect_modes<X86FoldableSchedWrite sched, X86VectorVTInf _.RC:$src1, (_.VT _.RC:$src2)))]>, EVEX, VVVV, T8, XD, Sched<[sched]>; + let mayLoad = 1 in { def rm : I<0x68, MRMSrcMem, (outs _.KRPC:$dst), (ins _.RC:$src1, _.MemOp:$src2), @@ -12679,6 +12748,7 @@ multiclass avx512_vp2intersect_modes<X86FoldableSchedWrite sched, X86VectorVTInf _.RC:$src1, (_.VT (_.BroadcastLdFrag addr:$src2))))]>, EVEX, VVVV, T8, XD, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>, Sched<[sched.Folded, sched.ReadAfterFold]>; + } } multiclass avx512_vp2intersect<X86SchedWriteWidths sched, AVX512VLVectorVTInfo _> { @@ -12882,6 +12952,7 @@ let Predicates = [HasFP16] in { // Move word ( r/m16) to Packed word def VMOVW2SHrr : AVX512<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src), "vmovw\t{$src, $dst|$dst, $src}", []>, T_MAP5, PD, EVEX, Sched<[WriteVecMoveFromGpr]>; +let mayLoad = 1 in def VMOVWrm : AVX512<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i16mem:$src), "vmovw\t{$src, $dst|$dst, $src}", [(set VR128X:$dst, @@ -13607,6 +13678,7 @@ multiclass avx512_cfmbinop_sh_common<bits<8> opc, string OpcodeStr, SDNode OpNod (v4f32 (OpNode VR128X:$src1, VR128X:$src2)), IsCommutable, IsCommutable, IsCommutable, X86selects, "@earlyclobber $dst">, Sched<[WriteFMAX]>; + let mayLoad = 1 in defm rm : AVX512_maskable<opc, MRMSrcMem, f32x_info, (outs VR128X:$dst), (ins VR128X:$src1, ssmem:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", diff --git a/llvm/lib/TargetParser/TargetParser.cpp b/llvm/lib/TargetParser/TargetParser.cpp index 34b09b1..b906690 100644 --- a/llvm/lib/TargetParser/TargetParser.cpp +++ b/llvm/lib/TargetParser/TargetParser.cpp @@ -444,6 +444,7 @@ static void fillAMDGCNFeatureMap(StringRef GPU, const Triple &T, Features["atomic-fmin-fmax-global-f32"] = true; Features["atomic-fmin-fmax-global-f64"] = true; Features["wavefrontsize32"] = true; + Features["cluster"] = true; break; case GK_GFX1201: case GK_GFX1200: diff --git a/llvm/lib/Transforms/Utils/SCCPSolver.cpp b/llvm/lib/Transforms/Utils/SCCPSolver.cpp index af216cd..9693ae6 100644 --- a/llvm/lib/Transforms/Utils/SCCPSolver.cpp +++ b/llvm/lib/Transforms/Utils/SCCPSolver.cpp @@ -317,24 +317,29 @@ static Value *simplifyInstruction(SCCPSolver &Solver, // Early exit if we know nothing about X. if (LRange.isFullSet()) return nullptr; - // We are allowed to refine the comparison to either true or false for out - // of range inputs. Here we refine the comparison to true, i.e. we relax - // the range check. - auto NewCR = CR->exactUnionWith(LRange.inverse()); - // TODO: Check if we can narrow the range check to an equality test. - // E.g, for X in [0, 4), X - 3 u< 2 -> X == 3 - if (!NewCR) + auto ConvertCRToICmp = + [&](const std::optional<ConstantRange> &NewCR) -> Value * { + ICmpInst::Predicate Pred; + APInt RHS; + // Check if we can represent NewCR as an icmp predicate. + if (NewCR && NewCR->getEquivalentICmp(Pred, RHS)) { + IRBuilder<NoFolder> Builder(&Inst); + Value *NewICmp = + Builder.CreateICmp(Pred, X, ConstantInt::get(X->getType(), RHS)); + InsertedValues.insert(NewICmp); + return NewICmp; + } return nullptr; - ICmpInst::Predicate Pred; - APInt RHS; - // Check if we can represent NewCR as an icmp predicate. - if (NewCR->getEquivalentICmp(Pred, RHS)) { - IRBuilder<NoFolder> Builder(&Inst); - Value *NewICmp = - Builder.CreateICmp(Pred, X, ConstantInt::get(X->getType(), RHS)); - InsertedValues.insert(NewICmp); - return NewICmp; - } + }; + // We are allowed to refine the comparison to either true or false for out + // of range inputs. + // Here we refine the comparison to false, and check if we can narrow the + // range check to a simpler test. + if (auto *V = ConvertCRToICmp(CR->exactIntersectWith(LRange))) + return V; + // Here we refine the comparison to true, i.e. we relax the range check. + if (auto *V = ConvertCRToICmp(CR->exactUnionWith(LRange.inverse()))) + return V; } } diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index 48055ad..148bfa8 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -5734,15 +5734,66 @@ bool SimplifyCFGOpt::simplifyUnreachable(UnreachableInst *UI) { return Changed; } -static bool casesAreContiguous(SmallVectorImpl<ConstantInt *> &Cases) { +struct ContiguousCasesResult { + ConstantInt *Min; + ConstantInt *Max; + BasicBlock *Dest; + BasicBlock *OtherDest; + SmallVectorImpl<ConstantInt *> *Cases; + SmallVectorImpl<ConstantInt *> *OtherCases; +}; + +static std::optional<ContiguousCasesResult> +findContiguousCases(Value *Condition, SmallVectorImpl<ConstantInt *> &Cases, + SmallVectorImpl<ConstantInt *> &OtherCases, + BasicBlock *Dest, BasicBlock *OtherDest) { assert(Cases.size() >= 1); array_pod_sort(Cases.begin(), Cases.end(), constantIntSortPredicate); - for (size_t I = 1, E = Cases.size(); I != E; ++I) { - if (Cases[I - 1]->getValue() != Cases[I]->getValue() + 1) - return false; + const APInt &Min = Cases.back()->getValue(); + const APInt &Max = Cases.front()->getValue(); + APInt Offset = Max - Min; + size_t ContiguousOffset = Cases.size() - 1; + if (Offset == ContiguousOffset) { + return ContiguousCasesResult{ + /*Min=*/Cases.back(), + /*Max=*/Cases.front(), + /*Dest=*/Dest, + /*OtherDest=*/OtherDest, + /*Cases=*/&Cases, + /*OtherCases=*/&OtherCases, + }; } - return true; + ConstantRange CR = computeConstantRange(Condition, /*ForSigned=*/false); + // If this is a wrapping contiguous range, that is, [Min, OtherMin] + + // [OtherMax, Max] (also [OtherMax, OtherMin]), [OtherMin+1, OtherMax-1] is a + // contiguous range for the other destination. N.B. If CR is not a full range, + // Max+1 is not equal to Min. It's not continuous in arithmetic. + if (Max == CR.getUnsignedMax() && Min == CR.getUnsignedMin()) { + assert(Cases.size() >= 2); + auto *It = + std::adjacent_find(Cases.begin(), Cases.end(), [](auto L, auto R) { + return L->getValue() != R->getValue() + 1; + }); + if (It == Cases.end()) + return std::nullopt; + auto [OtherMax, OtherMin] = std::make_pair(*It, *std::next(It)); + if ((Max - OtherMax->getValue()) + (OtherMin->getValue() - Min) == + Cases.size() - 2) { + return ContiguousCasesResult{ + /*Min=*/cast<ConstantInt>( + ConstantInt::get(OtherMin->getType(), OtherMin->getValue() + 1)), + /*Max=*/ + cast<ConstantInt>( + ConstantInt::get(OtherMax->getType(), OtherMax->getValue() - 1)), + /*Dest=*/OtherDest, + /*OtherDest=*/Dest, + /*Cases=*/&OtherCases, + /*OtherCases=*/&Cases, + }; + } + } + return std::nullopt; } static void createUnreachableSwitchDefault(SwitchInst *Switch, @@ -5779,7 +5830,6 @@ bool SimplifyCFGOpt::turnSwitchRangeIntoICmp(SwitchInst *SI, bool HasDefault = !SI->defaultDestUnreachable(); auto *BB = SI->getParent(); - // Partition the cases into two sets with different destinations. BasicBlock *DestA = HasDefault ? SI->getDefaultDest() : nullptr; BasicBlock *DestB = nullptr; @@ -5813,37 +5863,62 @@ bool SimplifyCFGOpt::turnSwitchRangeIntoICmp(SwitchInst *SI, assert(!CasesA.empty() || HasDefault); // Figure out if one of the sets of cases form a contiguous range. - SmallVectorImpl<ConstantInt *> *ContiguousCases = nullptr; - BasicBlock *ContiguousDest = nullptr; - BasicBlock *OtherDest = nullptr; - if (!CasesA.empty() && casesAreContiguous(CasesA)) { - ContiguousCases = &CasesA; - ContiguousDest = DestA; - OtherDest = DestB; - } else if (casesAreContiguous(CasesB)) { - ContiguousCases = &CasesB; - ContiguousDest = DestB; - OtherDest = DestA; - } else - return false; + std::optional<ContiguousCasesResult> ContiguousCases; + + // Only one icmp is needed when there is only one case. + if (!HasDefault && CasesA.size() == 1) + ContiguousCases = ContiguousCasesResult{ + /*Min=*/CasesA[0], + /*Max=*/CasesA[0], + /*Dest=*/DestA, + /*OtherDest=*/DestB, + /*Cases=*/&CasesA, + /*OtherCases=*/&CasesB, + }; + else if (CasesB.size() == 1) + ContiguousCases = ContiguousCasesResult{ + /*Min=*/CasesB[0], + /*Max=*/CasesB[0], + /*Dest=*/DestB, + /*OtherDest=*/DestA, + /*Cases=*/&CasesB, + /*OtherCases=*/&CasesA, + }; + // Correctness: Cases to the default destination cannot be contiguous cases. + else if (!HasDefault) + ContiguousCases = + findContiguousCases(SI->getCondition(), CasesA, CasesB, DestA, DestB); - // Start building the compare and branch. + if (!ContiguousCases) + ContiguousCases = + findContiguousCases(SI->getCondition(), CasesB, CasesA, DestB, DestA); - Constant *Offset = ConstantExpr::getNeg(ContiguousCases->back()); - Constant *NumCases = - ConstantInt::get(Offset->getType(), ContiguousCases->size()); + if (!ContiguousCases) + return false; - Value *Sub = SI->getCondition(); - if (!Offset->isNullValue()) - Sub = Builder.CreateAdd(Sub, Offset, Sub->getName() + ".off"); + auto [Min, Max, Dest, OtherDest, Cases, OtherCases] = *ContiguousCases; - Value *Cmp; + // Start building the compare and branch. + + Constant *Offset = ConstantExpr::getNeg(Min); + Constant *NumCases = ConstantInt::get(Offset->getType(), + Max->getValue() - Min->getValue() + 1); + BranchInst *NewBI; + if (NumCases->isOneValue()) { + assert(Max->getValue() == Min->getValue()); + Value *Cmp = Builder.CreateICmpEQ(SI->getCondition(), Min); + NewBI = Builder.CreateCondBr(Cmp, Dest, OtherDest); + } // If NumCases overflowed, then all possible values jump to the successor. - if (NumCases->isNullValue() && !ContiguousCases->empty()) - Cmp = ConstantInt::getTrue(SI->getContext()); - else - Cmp = Builder.CreateICmpULT(Sub, NumCases, "switch"); - BranchInst *NewBI = Builder.CreateCondBr(Cmp, ContiguousDest, OtherDest); + else if (NumCases->isNullValue() && !Cases->empty()) { + NewBI = Builder.CreateBr(Dest); + } else { + Value *Sub = SI->getCondition(); + if (!Offset->isNullValue()) + Sub = Builder.CreateAdd(Sub, Offset, Sub->getName() + ".off"); + Value *Cmp = Builder.CreateICmpULT(Sub, NumCases, "switch"); + NewBI = Builder.CreateCondBr(Cmp, Dest, OtherDest); + } // Update weight for the newly-created conditional branch. if (hasBranchWeightMD(*SI)) { @@ -5853,7 +5928,7 @@ bool SimplifyCFGOpt::turnSwitchRangeIntoICmp(SwitchInst *SI, uint64_t TrueWeight = 0; uint64_t FalseWeight = 0; for (size_t I = 0, E = Weights.size(); I != E; ++I) { - if (SI->getSuccessor(I) == ContiguousDest) + if (SI->getSuccessor(I) == Dest) TrueWeight += Weights[I]; else FalseWeight += Weights[I]; @@ -5868,15 +5943,15 @@ bool SimplifyCFGOpt::turnSwitchRangeIntoICmp(SwitchInst *SI, } // Prune obsolete incoming values off the successors' PHI nodes. - for (auto BBI = ContiguousDest->begin(); isa<PHINode>(BBI); ++BBI) { - unsigned PreviousEdges = ContiguousCases->size(); - if (ContiguousDest == SI->getDefaultDest()) + for (auto BBI = Dest->begin(); isa<PHINode>(BBI); ++BBI) { + unsigned PreviousEdges = Cases->size(); + if (Dest == SI->getDefaultDest()) ++PreviousEdges; for (unsigned I = 0, E = PreviousEdges - 1; I != E; ++I) cast<PHINode>(BBI)->removeIncomingValue(SI->getParent()); } for (auto BBI = OtherDest->begin(); isa<PHINode>(BBI); ++BBI) { - unsigned PreviousEdges = SI->getNumCases() - ContiguousCases->size(); + unsigned PreviousEdges = OtherCases->size(); if (OtherDest == SI->getDefaultDest()) ++PreviousEdges; for (unsigned I = 0, E = PreviousEdges - 1; I != E; ++I) diff --git a/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir b/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir deleted file mode 100644 index 0298168..0000000 --- a/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir +++ /dev/null @@ -1,1009 +0,0 @@ -# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 -# RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-zpr-predicate-spills -run-pass=greedy %s -o - | FileCheck %s -# RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-zpr-predicate-spills -start-before=greedy -stop-after=aarch64-expand-pseudo -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=EXPAND ---- | - source_filename = "<stdin>" - target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" - target triple = "aarch64--linux-gnu" - - define aarch64_sve_vector_pcs void @zpr_predicate_spill() #0 { entry: unreachable } - - define aarch64_sve_vector_pcs void @zpr_predicate_spill__save_restore_nzcv() #0 { entry: unreachable } - - define aarch64_sve_vector_pcs void @zpr_predicate_spill__save_restore_nzcv__scavenge_csr_gpr() #0 { entry: unreachable } - - define aarch64_sve_vector_pcs void @zpr_predicate_spill__spill_zpr() #0 { entry: unreachable } - - define aarch64_sve_vector_pcs void @zpr_predicate_spill_above_p7() #0 { entry: unreachable } - - define aarch64_sve_vector_pcs void @zpr_predicate_spill_p4_saved() #0 { entry: unreachable } - - attributes #0 = {nounwind "target-features"="+sme,+sve" "aarch64_pstate_sm_compatible"} -... ---- -name: zpr_predicate_spill -tracksRegLiveness: true -stack: -liveins: - - { reg: '$p0' } -body: | - bb.0.entry: - liveins: $p0 - - ; CHECK-LABEL: name: zpr_predicate_spill - ; CHECK: stack: - ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 16, alignment: 16, - ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: - ; CHECK: liveins: $p0 - ; CHECK-NEXT: {{ $}} - ; - ; CHECK-NEXT: SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p0, %stack.0, 0 :: (store (s128) into %stack.0) - ; - ; CHECK-NEXT: $p0 = IMPLICIT_DEF - ; CHECK-NEXT: $p1 = IMPLICIT_DEF - ; CHECK-NEXT: $p2 = IMPLICIT_DEF - ; CHECK-NEXT: $p3 = IMPLICIT_DEF - ; CHECK-NEXT: $p4 = IMPLICIT_DEF - ; CHECK-NEXT: $p5 = IMPLICIT_DEF - ; CHECK-NEXT: $p6 = IMPLICIT_DEF - ; CHECK-NEXT: $p7 = IMPLICIT_DEF - ; CHECK-NEXT: $p8 = IMPLICIT_DEF - ; CHECK-NEXT: $p9 = IMPLICIT_DEF - ; CHECK-NEXT: $p10 = IMPLICIT_DEF - ; CHECK-NEXT: $p11 = IMPLICIT_DEF - ; CHECK-NEXT: $p12 = IMPLICIT_DEF - ; CHECK-NEXT: $p13 = IMPLICIT_DEF - ; CHECK-NEXT: $p14 = IMPLICIT_DEF - ; CHECK-NEXT: $p15 = IMPLICIT_DEF - ; - ; CHECK-NEXT: $p0 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0 :: (load (s128) from %stack.0) - ; - ; CHECK-NEXT: RET_ReallyLR implicit $p0 - - ; EXPAND-LABEL: name: zpr_predicate_spill - ; EXPAND: liveins: $p0, $fp, $p15, $p14, $p13, $p12, $p11, $p10, $p9, $p8, $p7, $p6, $p5, $p4 - ; EXPAND-NEXT: {{ $}} - ; - ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0 - ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.14) - ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -12, implicit $vg - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p15, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.13) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p14, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.12) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p13, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 2 :: (store (s128) into %stack.11) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p12, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 3 :: (store (s128) into %stack.10) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p11, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 4 :: (store (s128) into %stack.9) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p10, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 5 :: (store (s128) into %stack.8) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p9, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 6 :: (store (s128) into %stack.7) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 7 :: (store (s128) into %stack.6) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p7, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 8 :: (store (s128) into %stack.5) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p6, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 9 :: (store (s128) into %stack.4) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p5, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 10 :: (store (s128) into %stack.3) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 11 :: (store (s128) into %stack.2) - ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0 - ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg - ; - ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p0, 1, 0 - ; EXPAND-NEXT: $x8 = ADDXri $sp, 1024, 0 - ; EXPAND-NEXT: STR_ZXI $z0, $x8, 0 :: (store (s128) into %stack.0) - ; - ; EXPAND-NEXT: $p0 = IMPLICIT_DEF - ; EXPAND-NEXT: $p1 = IMPLICIT_DEF - ; EXPAND-NEXT: $p2 = IMPLICIT_DEF - ; EXPAND-NEXT: $p3 = IMPLICIT_DEF - ; EXPAND-NEXT: $p4 = IMPLICIT_DEF - ; EXPAND-NEXT: $p5 = IMPLICIT_DEF - ; EXPAND-NEXT: $p6 = IMPLICIT_DEF - ; EXPAND-NEXT: $p7 = IMPLICIT_DEF - ; EXPAND-NEXT: $p8 = IMPLICIT_DEF - ; EXPAND-NEXT: $p9 = IMPLICIT_DEF - ; EXPAND-NEXT: $p10 = IMPLICIT_DEF - ; EXPAND-NEXT: $p11 = IMPLICIT_DEF - ; EXPAND-NEXT: $p12 = IMPLICIT_DEF - ; EXPAND-NEXT: $p13 = IMPLICIT_DEF - ; EXPAND-NEXT: $p14 = IMPLICIT_DEF - ; EXPAND-NEXT: $p15 = IMPLICIT_DEF - ; - ; EXPAND-NEXT: $z0 = LDR_ZXI killed $x8, 0 :: (load (s128) from %stack.0) - ; EXPAND-NEXT: $p1 = frame-destroy PTRUE_B 31, implicit $vg - ; EXPAND-NEXT: $p0 = CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; - ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0 - ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.13) - ; EXPAND-NEXT: $p15 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.12) - ; EXPAND-NEXT: $p14 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.11) - ; EXPAND-NEXT: $p13 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.10) - ; EXPAND-NEXT: $p12 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 4 :: (load (s128) from %stack.9) - ; EXPAND-NEXT: $p11 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 5 :: (load (s128) from %stack.8) - ; EXPAND-NEXT: $p10 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 6 :: (load (s128) from %stack.7) - ; EXPAND-NEXT: $p9 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 7 :: (load (s128) from %stack.6) - ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 8 :: (load (s128) from %stack.5) - ; EXPAND-NEXT: $p7 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 9 :: (load (s128) from %stack.4) - ; EXPAND-NEXT: $p6 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 10 :: (load (s128) from %stack.3) - ; EXPAND-NEXT: $p5 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 11 :: (load (s128) from %stack.2) - ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 12, implicit $vg - ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.14) - ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0 - ; EXPAND-NEXT: RET undef $lr, implicit $p0 - %1:ppr = COPY $p0 - - $p0 = IMPLICIT_DEF - $p1 = IMPLICIT_DEF - $p2 = IMPLICIT_DEF - $p3 = IMPLICIT_DEF - $p4 = IMPLICIT_DEF - $p5 = IMPLICIT_DEF - $p6 = IMPLICIT_DEF - $p7 = IMPLICIT_DEF - $p8 = IMPLICIT_DEF - $p9 = IMPLICIT_DEF - $p10 = IMPLICIT_DEF - $p11 = IMPLICIT_DEF - $p12 = IMPLICIT_DEF - $p13 = IMPLICIT_DEF - $p14 = IMPLICIT_DEF - $p15 = IMPLICIT_DEF - - $p0 = COPY %1 - - RET_ReallyLR implicit $p0 -... ---- -name: zpr_predicate_spill__save_restore_nzcv -tracksRegLiveness: true -stack: -liveins: - - { reg: '$p0' } -body: | - bb.0.entry: - liveins: $p0 - - ; CHECK-LABEL: name: zpr_predicate_spill__save_restore_nzcv - ; CHECK: stack: - ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 16, alignment: 16, - ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: - ; CHECK: liveins: $p0 - ; CHECK-NEXT: {{ $}} - ; - ; CHECK-NEXT: $nzcv = IMPLICIT_DEF - ; - ; CHECK-NEXT: SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p0, %stack.0, 0 :: (store (s128) into %stack.0) - ; - ; CHECK-NEXT: $p0 = IMPLICIT_DEF - ; CHECK-NEXT: $p1 = IMPLICIT_DEF - ; CHECK-NEXT: $p2 = IMPLICIT_DEF - ; CHECK-NEXT: $p3 = IMPLICIT_DEF - ; CHECK-NEXT: $p4 = IMPLICIT_DEF - ; CHECK-NEXT: $p5 = IMPLICIT_DEF - ; CHECK-NEXT: $p6 = IMPLICIT_DEF - ; CHECK-NEXT: $p7 = IMPLICIT_DEF - ; CHECK-NEXT: $p8 = IMPLICIT_DEF - ; CHECK-NEXT: $p9 = IMPLICIT_DEF - ; CHECK-NEXT: $p10 = IMPLICIT_DEF - ; CHECK-NEXT: $p11 = IMPLICIT_DEF - ; CHECK-NEXT: $p12 = IMPLICIT_DEF - ; CHECK-NEXT: $p13 = IMPLICIT_DEF - ; CHECK-NEXT: $p14 = IMPLICIT_DEF - ; CHECK-NEXT: $p15 = IMPLICIT_DEF - ; - ; CHECK-NEXT: $p0 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0 :: (load (s128) from %stack.0) - ; - ; CHECK-NEXT: FAKE_USE implicit $nzcv - ; - ; CHECK-NEXT: RET_ReallyLR implicit $p0 - - ; EXPAND-LABEL: name: zpr_predicate_spill__save_restore_nzcv - ; EXPAND: liveins: $p0, $fp, $p15, $p14, $p13, $p12, $p11, $p10, $p9, $p8, $p7, $p6, $p5, $p4 - ; EXPAND-NEXT: {{ $}} - ; - ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0 - ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.14) - ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -12, implicit $vg - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p15, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.13) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p14, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.12) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p13, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 2 :: (store (s128) into %stack.11) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p12, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 3 :: (store (s128) into %stack.10) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p11, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 4 :: (store (s128) into %stack.9) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p10, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 5 :: (store (s128) into %stack.8) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p9, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 6 :: (store (s128) into %stack.7) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 7 :: (store (s128) into %stack.6) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p7, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 8 :: (store (s128) into %stack.5) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p6, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 9 :: (store (s128) into %stack.4) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p5, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 10 :: (store (s128) into %stack.3) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 11 :: (store (s128) into %stack.2) - ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0 - ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg - ; - ; EXPAND-NEXT: $nzcv = IMPLICIT_DEF - ; - ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p0, 1, 0 - ; EXPAND-NEXT: $x8 = ADDXri $sp, 1024, 0 - ; EXPAND-NEXT: STR_ZXI $z0, $x8, 0 :: (store (s128) into %stack.0) - ; - ; EXPAND-NEXT: $p0 = IMPLICIT_DEF - ; EXPAND-NEXT: $p1 = IMPLICIT_DEF - ; EXPAND-NEXT: $p2 = IMPLICIT_DEF - ; EXPAND-NEXT: $p3 = IMPLICIT_DEF - ; EXPAND-NEXT: $p4 = IMPLICIT_DEF - ; EXPAND-NEXT: $p5 = IMPLICIT_DEF - ; EXPAND-NEXT: $p6 = IMPLICIT_DEF - ; EXPAND-NEXT: $p7 = IMPLICIT_DEF - ; EXPAND-NEXT: $p8 = IMPLICIT_DEF - ; EXPAND-NEXT: $p9 = IMPLICIT_DEF - ; EXPAND-NEXT: $p10 = IMPLICIT_DEF - ; EXPAND-NEXT: $p11 = IMPLICIT_DEF - ; EXPAND-NEXT: $p12 = IMPLICIT_DEF - ; EXPAND-NEXT: $p13 = IMPLICIT_DEF - ; EXPAND-NEXT: $p14 = IMPLICIT_DEF - ; EXPAND-NEXT: $p15 = IMPLICIT_DEF - ; - ; EXPAND-NEXT: $z0 = LDR_ZXI killed $x8, 0 :: (load (s128) from %stack.0) - ; EXPAND-NEXT: $fp = MRS 55824, implicit-def $nzcv, implicit $nzcv - ; EXPAND-NEXT: $p0 = PTRUE_B 31, implicit $vg - ; EXPAND-NEXT: $p0 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: MSR 55824, $fp, implicit-def $nzcv - ; - ; EXPAND-NEXT: FAKE_USE implicit $nzcv - ; - ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0 - ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.13) - ; EXPAND-NEXT: $p1 = frame-destroy PTRUE_B 31, implicit $vg - ; EXPAND-NEXT: $p15 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.12) - ; EXPAND-NEXT: $p14 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.11) - ; EXPAND-NEXT: $p13 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.10) - ; EXPAND-NEXT: $p12 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 4 :: (load (s128) from %stack.9) - ; EXPAND-NEXT: $p11 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 5 :: (load (s128) from %stack.8) - ; EXPAND-NEXT: $p10 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 6 :: (load (s128) from %stack.7) - ; EXPAND-NEXT: $p9 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 7 :: (load (s128) from %stack.6) - ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 8 :: (load (s128) from %stack.5) - ; EXPAND-NEXT: $p7 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 9 :: (load (s128) from %stack.4) - ; EXPAND-NEXT: $p6 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 10 :: (load (s128) from %stack.3) - ; EXPAND-NEXT: $p5 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 11 :: (load (s128) from %stack.2) - ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 12, implicit $vg - ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.14) - ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0 - ; EXPAND-NEXT: RET undef $lr, implicit $p0 - $nzcv = IMPLICIT_DEF - - %1:ppr = COPY $p0 - - $p0 = IMPLICIT_DEF - $p1 = IMPLICIT_DEF - $p2 = IMPLICIT_DEF - $p3 = IMPLICIT_DEF - $p4 = IMPLICIT_DEF - $p5 = IMPLICIT_DEF - $p6 = IMPLICIT_DEF - $p7 = IMPLICIT_DEF - $p8 = IMPLICIT_DEF - $p9 = IMPLICIT_DEF - $p10 = IMPLICIT_DEF - $p11 = IMPLICIT_DEF - $p12 = IMPLICIT_DEF - $p13 = IMPLICIT_DEF - $p14 = IMPLICIT_DEF - $p15 = IMPLICIT_DEF - - $p0 = COPY %1 - - FAKE_USE implicit $nzcv - - RET_ReallyLR implicit $p0 -... ---- -name: zpr_predicate_spill__save_restore_nzcv__scavenge_csr_gpr -tracksRegLiveness: true -stack: -liveins: - - { reg: '$p0' } - - { reg: '$x0' } - - { reg: '$x1' } - - { reg: '$x2' } - - { reg: '$x3' } - - { reg: '$x4' } - - { reg: '$x5' } - - { reg: '$x6' } - - { reg: '$x7' } -body: | - bb.0.entry: - liveins: $p0, $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7 - - ; CHECK-LABEL: name: zpr_predicate_spill__save_restore_nzcv__scavenge_csr_gpr - ; CHECK: stack: - ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 16, alignment: 16, - ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: - ; CHECK: liveins: $p0, $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7 - ; CHECK-NEXT: {{ $}} - ; - ; CHECK-NEXT: $nzcv = IMPLICIT_DEF - ; - ; CHECK-NEXT: $x8 = IMPLICIT_DEF - ; CHECK-NEXT: $x9 = IMPLICIT_DEF - ; CHECK-NEXT: $x10 = IMPLICIT_DEF - ; CHECK-NEXT: $x11 = IMPLICIT_DEF - ; CHECK-NEXT: $x12 = IMPLICIT_DEF - ; CHECK-NEXT: $x13 = IMPLICIT_DEF - ; CHECK-NEXT: $x14 = IMPLICIT_DEF - ; CHECK-NEXT: $x15 = IMPLICIT_DEF - ; CHECK-NEXT: $x16 = IMPLICIT_DEF - ; CHECK-NEXT: $x17 = IMPLICIT_DEF - ; CHECK-NEXT: $x18 = IMPLICIT_DEF - ; - ; CHECK-NEXT: SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p0, %stack.0, 0 :: (store (s128) into %stack.0) - ; - ; CHECK-NEXT: $p0 = IMPLICIT_DEF - ; CHECK-NEXT: $p1 = IMPLICIT_DEF - ; CHECK-NEXT: $p2 = IMPLICIT_DEF - ; CHECK-NEXT: $p3 = IMPLICIT_DEF - ; CHECK-NEXT: $p4 = IMPLICIT_DEF - ; CHECK-NEXT: $p5 = IMPLICIT_DEF - ; CHECK-NEXT: $p6 = IMPLICIT_DEF - ; CHECK-NEXT: $p7 = IMPLICIT_DEF - ; CHECK-NEXT: $p8 = IMPLICIT_DEF - ; CHECK-NEXT: $p9 = IMPLICIT_DEF - ; CHECK-NEXT: $p10 = IMPLICIT_DEF - ; CHECK-NEXT: $p11 = IMPLICIT_DEF - ; CHECK-NEXT: $p12 = IMPLICIT_DEF - ; CHECK-NEXT: $p13 = IMPLICIT_DEF - ; CHECK-NEXT: $p14 = IMPLICIT_DEF - ; CHECK-NEXT: $p15 = IMPLICIT_DEF - ; - ; CHECK-NEXT: $p0 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0 :: (load (s128) from %stack.0) - ; - ; CHECK-NEXT: FAKE_USE implicit $nzcv, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18 - ; - ; CHECK-NEXT: RET_ReallyLR implicit $p0, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit $x4, implicit $x5, implicit $x6, implicit $x7, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18 - - ; EXPAND-LABEL: name: zpr_predicate_spill__save_restore_nzcv__scavenge_csr_gpr - ; EXPAND: liveins: $p0, $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7, $fp, $p15, $p14, $p13, $p12, $p11, $p10, $p9, $p8, $p7, $p6, $p5, $p4 - ; EXPAND-NEXT: {{ $}} - ; - ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0 - ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.14) - ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -12, implicit $vg - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p15, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.13) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p14, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.12) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p13, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 2 :: (store (s128) into %stack.11) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p12, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 3 :: (store (s128) into %stack.10) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p11, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 4 :: (store (s128) into %stack.9) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p10, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 5 :: (store (s128) into %stack.8) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p9, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 6 :: (store (s128) into %stack.7) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 7 :: (store (s128) into %stack.6) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p7, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 8 :: (store (s128) into %stack.5) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p6, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 9 :: (store (s128) into %stack.4) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p5, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 10 :: (store (s128) into %stack.3) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 11 :: (store (s128) into %stack.2) - ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0 - ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg - ; - ; EXPAND-NEXT: $nzcv = IMPLICIT_DEF - ; - ; EXPAND-NEXT: $x8 = IMPLICIT_DEF - ; EXPAND-NEXT: $x9 = IMPLICIT_DEF - ; EXPAND-NEXT: $x10 = IMPLICIT_DEF - ; EXPAND-NEXT: $x11 = IMPLICIT_DEF - ; EXPAND-NEXT: $x12 = IMPLICIT_DEF - ; EXPAND-NEXT: $x13 = IMPLICIT_DEF - ; EXPAND-NEXT: $x14 = IMPLICIT_DEF - ; EXPAND-NEXT: $x15 = IMPLICIT_DEF - ; EXPAND-NEXT: $x16 = IMPLICIT_DEF - ; EXPAND-NEXT: $x17 = IMPLICIT_DEF - ; EXPAND-NEXT: $x18 = IMPLICIT_DEF - ; - ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p0, 1, 0 - ; EXPAND-NEXT: $fp = ADDXri $sp, 1024, 0 - ; EXPAND-NEXT: STR_ZXI $z0, $fp, 0 :: (store (s128) into %stack.0) - ; - ; EXPAND-NEXT: $p0 = IMPLICIT_DEF - ; EXPAND-NEXT: $p1 = IMPLICIT_DEF - ; EXPAND-NEXT: $p2 = IMPLICIT_DEF - ; EXPAND-NEXT: $p3 = IMPLICIT_DEF - ; EXPAND-NEXT: $p4 = IMPLICIT_DEF - ; EXPAND-NEXT: $p5 = IMPLICIT_DEF - ; EXPAND-NEXT: $p6 = IMPLICIT_DEF - ; EXPAND-NEXT: $p7 = IMPLICIT_DEF - ; EXPAND-NEXT: $p8 = IMPLICIT_DEF - ; EXPAND-NEXT: $p9 = IMPLICIT_DEF - ; EXPAND-NEXT: $p10 = IMPLICIT_DEF - ; EXPAND-NEXT: $p11 = IMPLICIT_DEF - ; EXPAND-NEXT: $p12 = IMPLICIT_DEF - ; EXPAND-NEXT: $p13 = IMPLICIT_DEF - ; EXPAND-NEXT: $p14 = IMPLICIT_DEF - ; EXPAND-NEXT: $p15 = IMPLICIT_DEF - ; - ; EXPAND-NEXT: $z0 = LDR_ZXI killed $fp, 0 :: (load (s128) from %stack.0) - ; EXPAND-NEXT: $fp = MRS 55824, implicit-def $nzcv, implicit $nzcv - ; EXPAND-NEXT: $p0 = PTRUE_B 31, implicit $vg - ; EXPAND-NEXT: $p0 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: MSR 55824, $fp, implicit-def $nzcv - ; - ; EXPAND-NEXT: FAKE_USE implicit $nzcv, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18 - ; - ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0 - ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.13) - ; EXPAND-NEXT: $p1 = frame-destroy PTRUE_B 31, implicit $vg - ; EXPAND-NEXT: $p15 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.12) - ; EXPAND-NEXT: $p14 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.11) - ; EXPAND-NEXT: $p13 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.10) - ; EXPAND-NEXT: $p12 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 4 :: (load (s128) from %stack.9) - ; EXPAND-NEXT: $p11 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 5 :: (load (s128) from %stack.8) - ; EXPAND-NEXT: $p10 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 6 :: (load (s128) from %stack.7) - ; EXPAND-NEXT: $p9 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 7 :: (load (s128) from %stack.6) - ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 8 :: (load (s128) from %stack.5) - ; EXPAND-NEXT: $p7 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 9 :: (load (s128) from %stack.4) - ; EXPAND-NEXT: $p6 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 10 :: (load (s128) from %stack.3) - ; EXPAND-NEXT: $p5 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 11 :: (load (s128) from %stack.2) - ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 12, implicit $vg - ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.14) - ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0 - ; EXPAND-NEXT: RET undef $lr, implicit $p0, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit $x4, implicit $x5, implicit $x6, implicit $x7, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18 - $nzcv = IMPLICIT_DEF - $x8 = IMPLICIT_DEF - $x9 = IMPLICIT_DEF - $x10 = IMPLICIT_DEF - $x11 = IMPLICIT_DEF - $x12 = IMPLICIT_DEF - $x13 = IMPLICIT_DEF - $x14 = IMPLICIT_DEF - $x15 = IMPLICIT_DEF - $x16 = IMPLICIT_DEF - $x17 = IMPLICIT_DEF - $x18 = IMPLICIT_DEF - - %1:ppr = COPY $p0 - - $p0 = IMPLICIT_DEF - $p1 = IMPLICIT_DEF - $p2 = IMPLICIT_DEF - $p3 = IMPLICIT_DEF - $p4 = IMPLICIT_DEF - $p5 = IMPLICIT_DEF - $p6 = IMPLICIT_DEF - $p7 = IMPLICIT_DEF - $p8 = IMPLICIT_DEF - $p9 = IMPLICIT_DEF - $p10 = IMPLICIT_DEF - $p11 = IMPLICIT_DEF - $p12 = IMPLICIT_DEF - $p13 = IMPLICIT_DEF - $p14 = IMPLICIT_DEF - $p15 = IMPLICIT_DEF - - $p0 = COPY %1 - - FAKE_USE implicit $nzcv, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18 - - RET_ReallyLR implicit $p0, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit $x4, implicit $x5, implicit $x6, implicit $x7, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18 -... ---- -name: zpr_predicate_spill__spill_zpr -tracksRegLiveness: true -stack: -liveins: - - { reg: '$p0' } - - { reg: '$z0' } - - { reg: '$z1' } - - { reg: '$z2' } - - { reg: '$z3' } - - { reg: '$z4' } - - { reg: '$z5' } - - { reg: '$z6' } - - { reg: '$z7' } -body: | - bb.0.entry: - liveins: $p0, $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7 - - ; CHECK-LABEL: name: zpr_predicate_spill__spill_zpr - ; CHECK: stack: - ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 16, alignment: 16, - ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: - ; CHECK: liveins: $p0, $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7 - ; CHECK-NEXT: {{ $}} - ; - ; CHECK-NEXT: $z16 = IMPLICIT_DEF - ; CHECK-NEXT: $z17 = IMPLICIT_DEF - ; CHECK-NEXT: $z18 = IMPLICIT_DEF - ; CHECK-NEXT: $z19 = IMPLICIT_DEF - ; CHECK-NEXT: $z20 = IMPLICIT_DEF - ; CHECK-NEXT: $z21 = IMPLICIT_DEF - ; CHECK-NEXT: $z22 = IMPLICIT_DEF - ; CHECK-NEXT: $z23 = IMPLICIT_DEF - ; CHECK-NEXT: $z24 = IMPLICIT_DEF - ; CHECK-NEXT: $z25 = IMPLICIT_DEF - ; CHECK-NEXT: $z26 = IMPLICIT_DEF - ; CHECK-NEXT: $z27 = IMPLICIT_DEF - ; CHECK-NEXT: $z28 = IMPLICIT_DEF - ; CHECK-NEXT: $z29 = IMPLICIT_DEF - ; CHECK-NEXT: $z30 = IMPLICIT_DEF - ; CHECK-NEXT: $z31 = IMPLICIT_DEF - ; - ; CHECK-NEXT: SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p0, %stack.0, 0 :: (store (s128) into %stack.0) - ; - ; CHECK-NEXT: $p0 = IMPLICIT_DEF - ; CHECK-NEXT: $p1 = IMPLICIT_DEF - ; CHECK-NEXT: $p2 = IMPLICIT_DEF - ; CHECK-NEXT: $p3 = IMPLICIT_DEF - ; CHECK-NEXT: $p4 = IMPLICIT_DEF - ; CHECK-NEXT: $p5 = IMPLICIT_DEF - ; CHECK-NEXT: $p6 = IMPLICIT_DEF - ; CHECK-NEXT: $p7 = IMPLICIT_DEF - ; CHECK-NEXT: $p8 = IMPLICIT_DEF - ; CHECK-NEXT: $p9 = IMPLICIT_DEF - ; CHECK-NEXT: $p10 = IMPLICIT_DEF - ; CHECK-NEXT: $p11 = IMPLICIT_DEF - ; CHECK-NEXT: $p12 = IMPLICIT_DEF - ; CHECK-NEXT: $p13 = IMPLICIT_DEF - ; CHECK-NEXT: $p14 = IMPLICIT_DEF - ; CHECK-NEXT: $p15 = IMPLICIT_DEF - ; - ; CHECK-NEXT: $p0 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0 :: (load (s128) from %stack.0) - ; - ; CHECK-NEXT: FAKE_USE implicit $z16, implicit $z17, implicit $z18, implicit $z19, implicit $z20, implicit $z21, implicit $z22, implicit $z23, implicit $z24, implicit $z25, implicit $z26, implicit $z27, implicit $z28, implicit $z29, implicit $z30, implicit $z31 - ; - ; CHECK-NEXT: RET_ReallyLR implicit $p0, implicit $z0, implicit $z1, implicit $z2, implicit $z3, implicit $z4, implicit $z5, implicit $z6, implicit $z7 - - ; EXPAND-LABEL: name: zpr_predicate_spill__spill_zpr - ; EXPAND: liveins: $p0, $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7, $fp, $p15, $p14, $p13, $p12, $p11, $p10, $p9, $p8, $p7, $p6, $p5, $p4, $z23, $z22, $z21, $z20, $z19, $z18, $z17, $z16 - ; EXPAND-NEXT: {{ $}} - ; - ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0 - ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.22) - ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -20, implicit $vg - ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p15, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 0 :: (store (s128) into %stack.21) - ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p14, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 1 :: (store (s128) into %stack.20) - ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p13, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 2 :: (store (s128) into %stack.19) - ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p12, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 3 :: (store (s128) into %stack.18) - ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p11, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 4 :: (store (s128) into %stack.17) - ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p10, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 5 :: (store (s128) into %stack.16) - ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p9, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 6 :: (store (s128) into %stack.15) - ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p8, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 7 :: (store (s128) into %stack.14) - ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p7, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 8 :: (store (s128) into %stack.13) - ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p6, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 9 :: (store (s128) into %stack.12) - ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p5, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 10 :: (store (s128) into %stack.11) - ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p4, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 11 :: (store (s128) into %stack.10) - ; EXPAND-NEXT: frame-setup STR_ZXI killed $z23, $sp, 12 :: (store (s128) into %stack.9) - ; EXPAND-NEXT: frame-setup STR_ZXI killed $z22, $sp, 13 :: (store (s128) into %stack.8) - ; EXPAND-NEXT: frame-setup STR_ZXI killed $z21, $sp, 14 :: (store (s128) into %stack.7) - ; EXPAND-NEXT: frame-setup STR_ZXI killed $z20, $sp, 15 :: (store (s128) into %stack.6) - ; EXPAND-NEXT: frame-setup STR_ZXI killed $z19, $sp, 16 :: (store (s128) into %stack.5) - ; EXPAND-NEXT: frame-setup STR_ZXI killed $z18, $sp, 17 :: (store (s128) into %stack.4) - ; EXPAND-NEXT: frame-setup STR_ZXI killed $z17, $sp, 18 :: (store (s128) into %stack.3) - ; EXPAND-NEXT: frame-setup STR_ZXI killed $z16, $sp, 19 :: (store (s128) into %stack.2) - ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0 - ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg - ; - ; EXPAND-NEXT: $z16 = IMPLICIT_DEF - ; EXPAND-NEXT: $z17 = IMPLICIT_DEF - ; EXPAND-NEXT: $z18 = IMPLICIT_DEF - ; EXPAND-NEXT: $z19 = IMPLICIT_DEF - ; EXPAND-NEXT: $z20 = IMPLICIT_DEF - ; EXPAND-NEXT: $z21 = IMPLICIT_DEF - ; EXPAND-NEXT: $z22 = IMPLICIT_DEF - ; EXPAND-NEXT: $z23 = IMPLICIT_DEF - ; EXPAND-NEXT: $z24 = IMPLICIT_DEF - ; EXPAND-NEXT: $z25 = IMPLICIT_DEF - ; EXPAND-NEXT: $z26 = IMPLICIT_DEF - ; EXPAND-NEXT: $z27 = IMPLICIT_DEF - ; EXPAND-NEXT: $z28 = IMPLICIT_DEF - ; EXPAND-NEXT: $z29 = IMPLICIT_DEF - ; EXPAND-NEXT: $z30 = IMPLICIT_DEF - ; EXPAND-NEXT: $z31 = IMPLICIT_DEF - ; - ; EXPAND-NEXT: $x8 = ADDXri $sp, 1024, 0 - ; EXPAND-NEXT: STR_ZXI $z0, $x8, 0 :: (store (s128) into %stack.24) - ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p0, 1, 0 - ; EXPAND-NEXT: STR_ZXI $z0, $x8, 1 :: (store (s128) into %stack.0) - ; EXPAND-NEXT: $z0 = LDR_ZXI $x8, 0 :: (load (s128) from %stack.24) - ; - ; EXPAND-NEXT: $p0 = IMPLICIT_DEF - ; EXPAND-NEXT: $p1 = IMPLICIT_DEF - ; EXPAND-NEXT: $p2 = IMPLICIT_DEF - ; EXPAND-NEXT: $p3 = IMPLICIT_DEF - ; EXPAND-NEXT: $p4 = IMPLICIT_DEF - ; EXPAND-NEXT: $p5 = IMPLICIT_DEF - ; EXPAND-NEXT: $p6 = IMPLICIT_DEF - ; EXPAND-NEXT: $p7 = IMPLICIT_DEF - ; EXPAND-NEXT: $p8 = IMPLICIT_DEF - ; EXPAND-NEXT: $p9 = IMPLICIT_DEF - ; EXPAND-NEXT: $p10 = IMPLICIT_DEF - ; EXPAND-NEXT: $p11 = IMPLICIT_DEF - ; EXPAND-NEXT: $p12 = IMPLICIT_DEF - ; EXPAND-NEXT: $p13 = IMPLICIT_DEF - ; EXPAND-NEXT: $p14 = IMPLICIT_DEF - ; EXPAND-NEXT: $p15 = IMPLICIT_DEF - ; - ; EXPAND-NEXT: STR_ZXI $z0, $x8, 0 :: (store (s128) into %stack.24) - ; EXPAND-NEXT: $z0 = LDR_ZXI $x8, 1 :: (load (s128) from %stack.0) - ; EXPAND-NEXT: $p0 = PTRUE_B 31, implicit $vg - ; EXPAND-NEXT: $p0 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = LDR_ZXI killed $x8, 0 :: (load (s128) from %stack.24) - ; - ; EXPAND-NEXT: FAKE_USE implicit $z16, implicit $z17, implicit $z18, implicit $z19, implicit $z20, implicit $z21, implicit $z22, implicit $z23, implicit $z24, implicit $z25, implicit $z26, implicit $z27, implicit $z28, implicit $z29, implicit $z30, implicit $z31 - ; - ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0 - ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg - ; EXPAND-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 12 :: (load (s128) from %stack.9) - ; EXPAND-NEXT: $z22 = frame-destroy LDR_ZXI $sp, 13 :: (load (s128) from %stack.8) - ; EXPAND-NEXT: $z21 = frame-destroy LDR_ZXI $sp, 14 :: (load (s128) from %stack.7) - ; EXPAND-NEXT: $z20 = frame-destroy LDR_ZXI $sp, 15 :: (load (s128) from %stack.6) - ; EXPAND-NEXT: $z19 = frame-destroy LDR_ZXI $sp, 16 :: (load (s128) from %stack.5) - ; EXPAND-NEXT: $z18 = frame-destroy LDR_ZXI $sp, 17 :: (load (s128) from %stack.4) - ; EXPAND-NEXT: $z17 = frame-destroy LDR_ZXI $sp, 18 :: (load (s128) from %stack.3) - ; EXPAND-NEXT: $z16 = frame-destroy LDR_ZXI $sp, 19 :: (load (s128) from %stack.2) - ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.21) - ; EXPAND-NEXT: $p1 = frame-destroy PTRUE_B 31, implicit $vg - ; EXPAND-NEXT: $p15 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.20) - ; EXPAND-NEXT: $p14 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.19) - ; EXPAND-NEXT: $p13 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.18) - ; EXPAND-NEXT: $p12 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 4 :: (load (s128) from %stack.17) - ; EXPAND-NEXT: $p11 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 5 :: (load (s128) from %stack.16) - ; EXPAND-NEXT: $p10 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 6 :: (load (s128) from %stack.15) - ; EXPAND-NEXT: $p9 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 7 :: (load (s128) from %stack.14) - ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 8 :: (load (s128) from %stack.13) - ; EXPAND-NEXT: $p7 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 9 :: (load (s128) from %stack.12) - ; EXPAND-NEXT: $p6 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 10 :: (load (s128) from %stack.11) - ; EXPAND-NEXT: $p5 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 11 :: (load (s128) from %stack.10) - ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 20, implicit $vg - ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.22) - ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0 - ; EXPAND-NEXT: RET undef $lr, implicit $p0, implicit $z0, implicit $z1, implicit $z2, implicit $z3, implicit $z4, implicit $z5, implicit $z6, implicit $z7 - $z16 = IMPLICIT_DEF - $z17 = IMPLICIT_DEF - $z18 = IMPLICIT_DEF - $z19 = IMPLICIT_DEF - $z20 = IMPLICIT_DEF - $z21 = IMPLICIT_DEF - $z22 = IMPLICIT_DEF - $z23 = IMPLICIT_DEF - $z24 = IMPLICIT_DEF - $z25 = IMPLICIT_DEF - $z26 = IMPLICIT_DEF - $z27 = IMPLICIT_DEF - $z28 = IMPLICIT_DEF - $z29 = IMPLICIT_DEF - $z30 = IMPLICIT_DEF - $z31 = IMPLICIT_DEF - - %1:ppr = COPY $p0 - - $p0 = IMPLICIT_DEF - $p1 = IMPLICIT_DEF - $p2 = IMPLICIT_DEF - $p3 = IMPLICIT_DEF - $p4 = IMPLICIT_DEF - $p5 = IMPLICIT_DEF - $p6 = IMPLICIT_DEF - $p7 = IMPLICIT_DEF - $p8 = IMPLICIT_DEF - $p9 = IMPLICIT_DEF - $p10 = IMPLICIT_DEF - $p11 = IMPLICIT_DEF - $p12 = IMPLICIT_DEF - $p13 = IMPLICIT_DEF - $p14 = IMPLICIT_DEF - $p15 = IMPLICIT_DEF - - $p0 = COPY %1 - - FAKE_USE implicit $z16, implicit $z17, implicit $z18, implicit $z19, implicit $z20, implicit $z21, implicit $z22, implicit $z23, implicit $z24, implicit $z25, implicit $z26, implicit $z27, implicit $z28, implicit $z29, implicit $z30, implicit $z31 - - RET_ReallyLR implicit $p0, implicit $z0, implicit $z1, implicit $z2, implicit $z3, implicit $z4, implicit $z5, implicit $z6, implicit $z7 -... ---- -name: zpr_predicate_spill_above_p7 -tracksRegLiveness: true -stack: -liveins: - - { reg: '$p0' } - - { reg: '$p1' } - - { reg: '$p2' } - - { reg: '$p3' } -body: | - bb.0.entry: - liveins: $p0, $p1, $p2, $p3 - - ; CHECK-LABEL: name: zpr_predicate_spill_above_p7 - ; CHECK: stack: - ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 16, alignment: 16, - ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: - ; CHECK: liveins: $p0, $p1, $p2, $p3 - ; CHECK-NEXT: {{ $}} - ; - ; CHECK-NEXT: $p15 = IMPLICIT_DEF - ; - ; CHECK-NEXT: SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p15, %stack.0, 0 :: (store (s128) into %stack.0) - ; - ; CHECK-NEXT: $p0 = IMPLICIT_DEF - ; CHECK-NEXT: $p1 = IMPLICIT_DEF - ; CHECK-NEXT: $p2 = IMPLICIT_DEF - ; CHECK-NEXT: $p3 = IMPLICIT_DEF - ; CHECK-NEXT: $p4 = IMPLICIT_DEF - ; CHECK-NEXT: $p5 = IMPLICIT_DEF - ; CHECK-NEXT: $p6 = IMPLICIT_DEF - ; CHECK-NEXT: $p7 = IMPLICIT_DEF - ; CHECK-NEXT: $p8 = IMPLICIT_DEF - ; CHECK-NEXT: $p9 = IMPLICIT_DEF - ; CHECK-NEXT: $p10 = IMPLICIT_DEF - ; CHECK-NEXT: $p11 = IMPLICIT_DEF - ; CHECK-NEXT: $p12 = IMPLICIT_DEF - ; CHECK-NEXT: $p13 = IMPLICIT_DEF - ; CHECK-NEXT: $p14 = IMPLICIT_DEF - ; CHECK-NEXT: $p15 = IMPLICIT_DEF - ; - ; CHECK-NEXT: $p15 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0 :: (load (s128) from %stack.0) - ; - ; CHECK-NEXT: FAKE_USE implicit $p4, implicit $p5, implicit $p6, implicit $p7 - ; - ; CHECK-NEXT: RET_ReallyLR implicit $p0, implicit $p1, implicit $p2, implicit $p3 - - ; EXPAND-LABEL: name: zpr_predicate_spill_above_p7 - ; EXPAND: liveins: $p0, $p1, $p2, $p3, $fp, $p15, $p14, $p13, $p12, $p11, $p10, $p9, $p8, $p7, $p6, $p5, $p4 - ; EXPAND-NEXT: {{ $}} - ; - ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0 - ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.14) - ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -12, implicit $vg - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p15, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.13) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p14, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.12) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p13, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 2 :: (store (s128) into %stack.11) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p12, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 3 :: (store (s128) into %stack.10) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p11, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 4 :: (store (s128) into %stack.9) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p10, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 5 :: (store (s128) into %stack.8) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p9, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 6 :: (store (s128) into %stack.7) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 7 :: (store (s128) into %stack.6) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p7, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 8 :: (store (s128) into %stack.5) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p6, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 9 :: (store (s128) into %stack.4) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p5, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 10 :: (store (s128) into %stack.3) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 11 :: (store (s128) into %stack.2) - ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0 - ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg - ; - ; EXPAND-NEXT: $p15 = IMPLICIT_DEF - ; - ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p15, 1, 0 - ; EXPAND-NEXT: $x8 = ADDXri $sp, 1024, 0 - ; EXPAND-NEXT: STR_ZXI $z0, $x8, 1 :: (store (s128) into %stack.0) - ; - ; EXPAND-NEXT: $p0 = IMPLICIT_DEF - ; EXPAND-NEXT: $p1 = IMPLICIT_DEF - ; EXPAND-NEXT: $p2 = IMPLICIT_DEF - ; EXPAND-NEXT: $p3 = IMPLICIT_DEF - ; EXPAND-NEXT: $p4 = IMPLICIT_DEF - ; EXPAND-NEXT: $p5 = IMPLICIT_DEF - ; EXPAND-NEXT: $p6 = IMPLICIT_DEF - ; EXPAND-NEXT: $p7 = IMPLICIT_DEF - ; EXPAND-NEXT: $p8 = IMPLICIT_DEF - ; EXPAND-NEXT: $p9 = IMPLICIT_DEF - ; EXPAND-NEXT: $p10 = IMPLICIT_DEF - ; EXPAND-NEXT: $p11 = IMPLICIT_DEF - ; EXPAND-NEXT: $p12 = IMPLICIT_DEF - ; EXPAND-NEXT: $p13 = IMPLICIT_DEF - ; EXPAND-NEXT: $p14 = IMPLICIT_DEF - ; EXPAND-NEXT: $p15 = IMPLICIT_DEF - ; - ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p0, 1, 0 - ; EXPAND-NEXT: STR_ZXI $z0, $x8, 0 :: (store (s128) into %stack.16) - ; EXPAND-NEXT: $z0 = LDR_ZXI $x8, 1 :: (load (s128) from %stack.0) - ; EXPAND-NEXT: $p0 = PTRUE_B 31, implicit $vg - ; EXPAND-NEXT: $p15 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = LDR_ZXI killed $x8, 0 :: (load (s128) from %stack.16) - ; EXPAND-NEXT: $p0 = PTRUE_B 31, implicit $vg - ; EXPAND-NEXT: $p0 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; - ; EXPAND-NEXT: FAKE_USE implicit $p4, implicit $p5, implicit $p6, implicit $p7 - ; - ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0 - ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.13) - ; EXPAND-NEXT: $p4 = frame-destroy PTRUE_B 31, implicit $vg - ; EXPAND-NEXT: $p15 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.12) - ; EXPAND-NEXT: $p14 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.11) - ; EXPAND-NEXT: $p13 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.10) - ; EXPAND-NEXT: $p12 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 4 :: (load (s128) from %stack.9) - ; EXPAND-NEXT: $p11 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 5 :: (load (s128) from %stack.8) - ; EXPAND-NEXT: $p10 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 6 :: (load (s128) from %stack.7) - ; EXPAND-NEXT: $p9 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 7 :: (load (s128) from %stack.6) - ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 8 :: (load (s128) from %stack.5) - ; EXPAND-NEXT: $p7 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 9 :: (load (s128) from %stack.4) - ; EXPAND-NEXT: $p6 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 10 :: (load (s128) from %stack.3) - ; EXPAND-NEXT: $p5 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 11 :: (load (s128) from %stack.2) - ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 12, implicit $vg - ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.14) - ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0 - ; EXPAND-NEXT: RET undef $lr, implicit $p0, implicit $p1, implicit $p2, implicit $p3 - $p15 = IMPLICIT_DEF - %1:ppr = COPY $p15 - - $p0 = IMPLICIT_DEF - $p1 = IMPLICIT_DEF - $p2 = IMPLICIT_DEF - $p3 = IMPLICIT_DEF - $p4 = IMPLICIT_DEF - $p5 = IMPLICIT_DEF - $p6 = IMPLICIT_DEF - $p7 = IMPLICIT_DEF - $p8 = IMPLICIT_DEF - $p9 = IMPLICIT_DEF - $p10 = IMPLICIT_DEF - $p11 = IMPLICIT_DEF - $p12 = IMPLICIT_DEF - $p13 = IMPLICIT_DEF - $p14 = IMPLICIT_DEF - $p15 = IMPLICIT_DEF - - $p15 = COPY %1 - - FAKE_USE implicit $p4, implicit $p5, implicit $p6, implicit $p7 - - RET_ReallyLR implicit $p0, implicit $p1, implicit $p2, implicit $p3 -... ---- -name: zpr_predicate_spill_p4_saved -tracksRegLiveness: true -stack: -liveins: - - { reg: '$p0' } - - { reg: '$p1' } - - { reg: '$p2' } - - { reg: '$p3' } -body: | - bb.0.entry: - liveins: $p0, $p1, $p2, $p3 - - ; CHECK-LABEL: name: zpr_predicate_spill_p4_saved - ; CHECK: liveins: $p0, $p1, $p2, $p3 - ; CHECK-NEXT: {{ $}} - ; - ; CHECK-NEXT: $p8 = IMPLICIT_DEF - ; - ; CHECK-NEXT: RET_ReallyLR implicit $p0, implicit $p1, implicit $p2, implicit $p3 - - ; EXPAND-LABEL: name: zpr_predicate_spill_p4_saved - ; EXPAND: liveins: $p0, $p1, $p2, $p3, $fp, $p8, $p4 - ; EXPAND-NEXT: {{ $}} - ; EXPAND-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) - ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.1) - ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.0) - ; - ; EXPAND-NEXT: $p8 = IMPLICIT_DEF - ; - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.1) - ; EXPAND-NEXT: $p4 = frame-destroy PTRUE_B 31, implicit $vg - ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.0) - ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg - ; EXPAND-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) - ; EXPAND-NEXT: RET undef $lr, implicit $p0, implicit $p1, implicit $p2, implicit $p3 - - ; If we spill a register above p8, p4 must also be saved, so we can guarantee - ; they will be a register (in the range p0-p7 to for the cmpne reload). - $p8 = IMPLICIT_DEF - - RET_ReallyLR implicit $p0, implicit $p1, implicit $p2, implicit $p3 -... diff --git a/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll b/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll index 01e3d3a..c0a2943 100644 --- a/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll +++ b/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll @@ -1,7 +1,5 @@ ; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -pass-remarks-analysis=sme -aarch64-stack-hazard-remark-size=64 -o /dev/null < %s 2>&1 | FileCheck %s --check-prefixes=CHECK ; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -pass-remarks-analysis=sme -aarch64-stack-hazard-size=1024 -o /dev/null < %s 2>&1 | FileCheck %s --check-prefixes=CHECK-PADDING -; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -pass-remarks-analysis=sme -aarch64-enable-zpr-predicate-spills -aarch64-stack-hazard-remark-size=64 -o /dev/null < %s 2>&1 | FileCheck %s --check-prefixes=CHECK-ZPR-PRED-SPILLS -; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -pass-remarks-analysis=sme -aarch64-enable-zpr-predicate-spills -aarch64-stack-hazard-size=1024 -o /dev/null < %s 2>&1 | FileCheck %s --check-prefixes=CHECK-ZPR-PRED-SPILLS-WITH-PADDING ; Don't emit remarks for non-streaming functions. define float @csr_x20_stackargs_notsc(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, float %i) { @@ -69,16 +67,11 @@ entry: ; SVE calling conventions ; Padding is placed between predicate and fpr/zpr register spills, so only emit remarks when hazard padding is off. -; Note: The -aarch64-enable-zpr-predicate-spills option is deprecated (and will be removed soon). define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) #2 { ; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at [SP-64-258 * vscale] is too close to FPR stack object at [SP-64-256 * vscale] ; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64] ; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'svecc_call': -; CHECK-ZPR-PRED-SPILLS-NOT: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at {{.*}} is too close to FPR stack object -; CHECK-ZPR-PRED-SPILLS: <unknown>:0:0: stack hazard in 'svecc_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64] -; CHECK-ZPR-PRED-SPILLS-WITH-PADDING-NOT: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at {{.*}} is too close to FPR stack object -; CHECK-ZPR-PRED-SPILLS-WITH-PADDING-NOT: <unknown>:0:0: stack hazard in 'svecc_call': FPR stack object at {{.*}} is too close to GPR stack object entry: tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2 %call = call ptr @memset(ptr noundef nonnull %P1, i32 noundef 45, i32 noundef 37) @@ -89,10 +82,6 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': PPR stack object at [SP-64-258 * vscale] is too close to FPR stack object at [SP-64-256 * vscale] ; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64] ; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': -; CHECK-ZPR-PRED-SPILLS-NOT: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at {{.*}} is too close to FPR stack object -; CHECK-ZPR-PRED-SPILLS: <unknown>:0:0: stack hazard in 'svecc_alloca_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64] -; CHECK-ZPR-PRED-SPILLS-WITH-PADDING-NOT: <unknown>:0:0: stack hazard in 'svecc_alloca_call': PPR stack object at {{.*}} is too close to FPR stack object -; CHECK-ZPR-PRED-SPILLS-WITH-PADDING-NOT: <unknown>:0:0: stack hazard in 'svecc_alloca_call': FPR stack object at {{.*}} is too close to GPR stack object entry: tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2 %0 = alloca [37 x i8], align 16 diff --git a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll index 6b7d704..ede470b 100644 --- a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll +++ b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll @@ -1,13 +1,11 @@ ; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 < %s | FileCheck --check-prefixes=CHECK,GFX11 %s ; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 < %s | FileCheck --check-prefixes=CHECK,GFX12 %s -; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=+dynamic-vgpr < %s | FileCheck --check-prefixes=CHECK,GFX12,DVGPR %s ; CHECK: .amdgpu_pal_metadata ; CHECK-NEXT: --- ; CHECK-NEXT: amdpal.pipelines: ; CHECK-NEXT: - .api: Vulkan ; CHECK-NEXT: .compute_registers: -; DVGPR-NEXT: .dynamic_vgpr_en: true ; CHECK-NEXT: .tg_size_en: true ; CHECK-NEXT: .tgid_x_en: false ; CHECK-NEXT: .tgid_y_en: false diff --git a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll index 5c0c366..5325499 100644 --- a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll +++ b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll @@ -1,17 +1,14 @@ -; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 <%s | FileCheck %s --check-prefixes=CHECK,GFX11,NODVGPR -; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 <%s | FileCheck %s --check-prefixes=CHECK,NODVGPR -; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=+dynamic-vgpr <%s | FileCheck %s --check-prefixes=CHECK,DVGPR +; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 <%s | FileCheck %s --check-prefixes=CHECK,GFX11 +; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 <%s | FileCheck %s --check-prefixes=CHECK ; CHECK-LABEL: {{^}}_amdgpu_cs_main: -; NODVGPR: ; TotalNumSgprs: 4 -; DVGPR: ; TotalNumSgprs: 34 +; CHECK: ; TotalNumSgprs: 4 ; CHECK: ; NumVgprs: 2 ; CHECK: .amdgpu_pal_metadata ; CHECK-NEXT: --- ; CHECK-NEXT: amdpal.pipelines: ; CHECK-NEXT: - .api: Vulkan ; CHECK-NEXT: .compute_registers: -; DVGPR-NEXT: .dynamic_vgpr_en: true ; CHECK-NEXT: .tg_size_en: true ; CHECK-NEXT: .tgid_x_en: false ; CHECK-NEXT: .tgid_y_en: false @@ -57,7 +54,6 @@ ; CHECK-NEXT: .cs: ; CHECK-NEXT: .checksum_value: 0x9444d7d0 ; CHECK-NEXT: .debug_mode: false -; DVGPR-NEXT: .dynamic_vgpr_saved_count: 0x70 ; CHECK-NEXT: .entry_point: _amdgpu_cs_main ; CHECK-NEXT: .entry_point_symbol: _amdgpu_cs_main ; CHECK-NEXT: .excp_en: 0 @@ -69,8 +65,7 @@ ; CHECK-NEXT: .mem_ordered: true ; CHECK-NEXT: .scratch_en: false ; CHECK-NEXT: .scratch_memory_size: 0 -; NODVGPR-NEXT: .sgpr_count: 0x4 -; DVGPR-NEXT: .sgpr_count: 0x22 +; CHECK-NEXT: .sgpr_count: 0x4 ; CHECK-NEXT: .sgpr_limit: 0x6a ; CHECK-NEXT: .threadgroup_dimensions: ; CHECK-NEXT: - 0x1 diff --git a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6-dvgpr.ll b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6-dvgpr.ll new file mode 100644 index 0000000..e598b0c --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6-dvgpr.ll @@ -0,0 +1,204 @@ +; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 <%s | FileCheck %s --check-prefixes=CHECK + +; CHECK-LABEL: {{^}}_amdgpu_cs_main: +; CHECK: ; TotalNumSgprs: 34 +; CHECK: ; NumVgprs: 2 +; CHECK: .amdgpu_pal_metadata +; CHECK-NEXT: --- +; CHECK-NEXT: amdpal.pipelines: +; CHECK-NEXT: - .api: Vulkan +; CHECK-NEXT: .compute_registers: +; CHECK-NEXT: .dynamic_vgpr_en: true +; CHECK-NEXT: .tg_size_en: true +; CHECK-NEXT: .tgid_x_en: false +; CHECK-NEXT: .tgid_y_en: false +; CHECK-NEXT: .tgid_z_en: false +; CHECK-NEXT: .tidig_comp_cnt: 0x1 +; CHECK-NEXT: .graphics_registers: +; CHECK-NEXT: .ps_extra_lds_size: 0 +; CHECK-NEXT: .spi_ps_input_addr: +; CHECK-NEXT: .ancillary_ena: false +; CHECK-NEXT: .front_face_ena: true +; CHECK-NEXT: .line_stipple_tex_ena: false +; CHECK-NEXT: .linear_center_ena: true +; CHECK-NEXT: .linear_centroid_ena: true +; CHECK-NEXT: .linear_sample_ena: true +; CHECK-NEXT: .persp_center_ena: true +; CHECK-NEXT: .persp_centroid_ena: true +; CHECK-NEXT: .persp_pull_model_ena: false +; CHECK-NEXT: .persp_sample_ena: true +; CHECK-NEXT: .pos_fixed_pt_ena: true +; CHECK-NEXT: .pos_w_float_ena: false +; CHECK-NEXT: .pos_x_float_ena: false +; CHECK-NEXT: .pos_y_float_ena: false +; CHECK-NEXT: .pos_z_float_ena: false +; CHECK-NEXT: .sample_coverage_ena: false +; CHECK-NEXT: .spi_ps_input_ena: +; CHECK-NEXT: .ancillary_ena: false +; CHECK-NEXT: .front_face_ena: false +; CHECK-NEXT: .line_stipple_tex_ena: false +; CHECK-NEXT: .linear_center_ena: false +; CHECK-NEXT: .linear_centroid_ena: false +; CHECK-NEXT: .linear_sample_ena: false +; CHECK-NEXT: .persp_center_ena: false +; CHECK-NEXT: .persp_centroid_ena: false +; CHECK-NEXT: .persp_pull_model_ena: false +; CHECK-NEXT: .persp_sample_ena: true +; CHECK-NEXT: .pos_fixed_pt_ena: false +; CHECK-NEXT: .pos_w_float_ena: false +; CHECK-NEXT: .pos_x_float_ena: false +; CHECK-NEXT: .pos_y_float_ena: false +; CHECK-NEXT: .pos_z_float_ena: false +; CHECK-NEXT: .sample_coverage_ena: false +; CHECK-NEXT: .hardware_stages: +; CHECK-NEXT: .cs: +; CHECK-NEXT: .checksum_value: 0x9444d7d0 +; CHECK-NEXT: .debug_mode: false +; CHECK-NEXT: .dynamic_vgpr_saved_count: 0x70 +; CHECK-NOT: .entry_point: _amdgpu_cs_main +; CHECK-NEXT: .entry_point_symbol: _amdgpu_cs_main +; CHECK-NEXT: .excp_en: 0 +; CHECK-NEXT: .float_mode: 0xc0 +; CHECK-NEXT: .forward_progress: true +; GFX11-NEXT: .ieee_mode: false +; CHECK-NEXT: .image_op: false +; CHECK-NEXT: .lds_size: 0 +; CHECK-NEXT: .mem_ordered: true +; CHECK-NEXT: .scratch_en: false +; CHECK-NEXT: .scratch_memory_size: 0 +; CHECK-NEXT: .sgpr_count: 0x22 +; CHECK-NEXT: .sgpr_limit: 0x6a +; CHECK-NEXT: .threadgroup_dimensions: +; CHECK-NEXT: - 0x1 +; CHECK-NEXT: - 0x400 +; CHECK-NEXT: - 0x1 +; CHECK-NEXT: .trap_present: false +; CHECK-NEXT: .user_data_reg_map: +; CHECK-NEXT: - 0x10000000 +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0 +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: - 0xffffffff +; CHECK-NEXT: .user_sgprs: 0x3 +; CHECK-NEXT: .vgpr_count: 0x2 +; CHECK-NEXT: .vgpr_limit: 0x100 +; CHECK-NEXT: .wavefront_size: 0x40 +; CHECK-NEXT: .wgp_mode: false +; CHECK-NEXT: .gs: +; CHECK-NEXT: .debug_mode: false +; CHECK-NOT: .entry_point: _amdgpu_gs_main +; CHECK-NEXT: .entry_point_symbol: gs_shader +; CHECK-NEXT: .forward_progress: true +; GFX11-NEXT: .ieee_mode: false +; CHECK-NEXT: .lds_size: 0x200 +; CHECK-NEXT: .mem_ordered: true +; CHECK-NEXT: .scratch_en: false +; CHECK-NEXT: .scratch_memory_size: 0 +; CHECK-NEXT: .sgpr_count: 0x1 +; CHECK-NEXT: .vgpr_count: 0x1 +; CHECK-NEXT: .wgp_mode: true +; CHECK-NEXT: .hs: +; CHECK-NEXT: .debug_mode: false +; CHECK-NOT: .entry_point: _amdgpu_hs_main +; CHECK-NEXT: .entry_point_symbol: hs_shader +; CHECK-NEXT: .forward_progress: true +; GFX11-NEXT: .ieee_mode: false +; CHECK-NEXT: .lds_size: 0x1000 +; CHECK-NEXT: .mem_ordered: true +; CHECK-NEXT: .scratch_en: false +; CHECK-NEXT: .scratch_memory_size: 0 +; CHECK-NEXT: .sgpr_count: 0x1 +; CHECK-NEXT: .vgpr_count: 0x1 +; CHECK-NEXT: .wgp_mode: true +; CHECK-NEXT: .ps: +; CHECK-NEXT: .debug_mode: false +; CHECK-NOT: .entry_point: _amdgpu_ps_main +; CHECK-NEXT: .entry_point_symbol: ps_shader +; CHECK-NEXT: .forward_progress: true +; GFX11-NEXT: .ieee_mode: false +; CHECK-NEXT: .lds_size: 0 +; CHECK-NEXT: .mem_ordered: true +; CHECK-NEXT: .scratch_en: false +; CHECK-NEXT: .scratch_memory_size: 0 +; CHECK-NEXT: .sgpr_count: 0x1 +; CHECK-NEXT: .vgpr_count: 0x1 +; CHECK-NEXT: .wgp_mode: true +; CHECK: .registers: {} +; CHECK:amdpal.version: +; CHECK-NEXT: - 0x3 +; CHECK-NEXT: - 0x6 +; CHECK-NEXT:... +; CHECK-NEXT: .end_amdgpu_pal_metadata + +define dllexport amdgpu_cs void @_amdgpu_cs_main(i32 inreg %arg1, i32 %arg2) #0 !lgc.shaderstage !1 { +.entry: + %i = call i64 @llvm.amdgcn.s.getpc() + %i1 = and i64 %i, -4294967296 + %i2 = zext i32 %arg1 to i64 + %i3 = or i64 %i1, %i2 + %i4 = inttoptr i64 %i3 to ptr addrspace(4) + %i5 = and i32 %arg2, 1023 + %i6 = lshr i32 %arg2, 10 + %i7 = and i32 %i6, 1023 + %i8 = add nuw nsw i32 %i7, %i5 + %i9 = load <4 x i32>, ptr addrspace(4) %i4, align 16 + %.idx = shl nuw nsw i32 %i8, 2 + call void @llvm.amdgcn.raw.buffer.store.i32(i32 1, <4 x i32> %i9, i32 %.idx, i32 0, i32 0) + ret void +} + +define dllexport amdgpu_ps void @ps_shader() #1 { + ret void +} + +@LDS.GS = external addrspace(3) global [1 x i32], align 4 + +define dllexport amdgpu_gs void @gs_shader() { + %ptr = getelementptr i32, ptr addrspace(3) @LDS.GS, i32 0 + store i32 0, ptr addrspace(3) %ptr, align 4 + ret void +} + +@LDS.HS = external addrspace(3) global [1024 x i32], align 4 + +define dllexport amdgpu_hs void @hs_shader() { + %ptr = getelementptr i32, ptr addrspace(3) @LDS.HS, i32 0 + store i32 0, ptr addrspace(3) %ptr, align 4 + ret void +} + +!amdgpu.pal.metadata.msgpack = !{!0} + +attributes #0 = { nounwind memory(readwrite) "target-features"=",+wavefrontsize64,+cumode" "amdgpu-dynamic-vgpr-block-size"="16" } + +attributes #1 = { nounwind memory(readwrite) "InitialPSInputAddr"="36983" "amdgpu-dynamic-vgpr-block-size"="16" } + +!0 = !{!"\82\B0amdpal.pipelines\91\8A\A4.api\A6Vulkan\B2.compute_registers\85\AB.tg_size_en\C3\AA.tgid_x_en\C2\AA.tgid_y_en\C2\AA.tgid_z_en\C2\AF.tidig_comp_cnt\01\B0.hardware_stages\81\A3.cs\8C\AF.checksum_value\CE\94D\D7\D0\AB.debug_mode\00\AB.float_mode\CC\C0\A9.image_op\C2\AC.mem_ordered\C3\AB.sgpr_limitj\B7.threadgroup_dimensions\93\01\CD\04\00\01\AD.trap_present\00\B2.user_data_reg_map\DC\00 \CE\10\00\00\00\CE\FF\FF\FF\FF\00\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\AB.user_sgprs\03\AB.vgpr_limit\CD\01\00\AF.wavefront_size@\B7.internal_pipeline_hash\92\CF\E7\10k\A6:\A6%\F7\CF\B2\1F\1A\D4{\DA\E1T\AA.registers\80\A8.shaders\81\A8.compute\82\B0.api_shader_hash\92\CF\E9Zn7}\1E\B9\E7\00\B1.hardware_mapping\91\A3.cs\B0.spill_threshold\CE\FF\FF\FF\FF\A5.type\A2Cs\B0.user_data_limit\01\AF.xgl_cache_info\82\B3.128_bit_cache_hash\92\CF\B4X\B8\11[\A4\88P\CF\A0;\B0\AF\FF\B4\BE\C0\AD.llpc_version\A461.1\AEamdpal.version\92\03\06"} +!1 = !{i32 7} diff --git a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6.ll b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6.ll index 830872a..d2f26e8 100644 --- a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6.ll +++ b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6.ll @@ -1,17 +1,14 @@ -; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 <%s | FileCheck %s --check-prefixes=CHECK,GFX11,NODVGPR -; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 <%s | FileCheck %s --check-prefixes=CHECK,NODVGPR -; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=+dynamic-vgpr <%s | FileCheck %s --check-prefixes=CHECK,DVGPR +; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 <%s | FileCheck %s --check-prefixes=CHECK,GFX11 +; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 <%s | FileCheck %s --check-prefixes=CHECK ; CHECK-LABEL: {{^}}_amdgpu_cs_main: -; NODVGPR: ; TotalNumSgprs: 4 -; DVGPR: ; TotalNumSgprs: 34 +; CHECK: ; TotalNumSgprs: 4 ; CHECK: ; NumVgprs: 2 ; CHECK: .amdgpu_pal_metadata ; CHECK-NEXT: --- ; CHECK-NEXT: amdpal.pipelines: ; CHECK-NEXT: - .api: Vulkan ; CHECK-NEXT: .compute_registers: -; DVGPR-NEXT: .dynamic_vgpr_en: true ; CHECK-NEXT: .tg_size_en: true ; CHECK-NEXT: .tgid_x_en: false ; CHECK-NEXT: .tgid_y_en: false @@ -57,7 +54,6 @@ ; CHECK-NEXT: .cs: ; CHECK-NEXT: .checksum_value: 0x9444d7d0 ; CHECK-NEXT: .debug_mode: false -; DVGPR-NEXT: .dynamic_vgpr_saved_count: 0x70 ; CHECK-NOT: .entry_point: _amdgpu_cs_main ; CHECK-NEXT: .entry_point_symbol: _amdgpu_cs_main ; CHECK-NEXT: .excp_en: 0 @@ -69,8 +65,7 @@ ; CHECK-NEXT: .mem_ordered: true ; CHECK-NEXT: .scratch_en: false ; CHECK-NEXT: .scratch_memory_size: 0 -; NODVGPR-NEXT: .sgpr_count: 0x4 -; DVGPR-NEXT: .sgpr_count: 0x22 +; CHECK-NEXT: .sgpr_count: 0x4 ; CHECK-NEXT: .sgpr_limit: 0x6a ; CHECK-NEXT: .threadgroup_dimensions: ; CHECK-NEXT: - 0x1 diff --git a/llvm/test/CodeGen/NVPTX/convert-sm103a.ll b/llvm/test/CodeGen/NVPTX/convert-sm103a.ll new file mode 100644 index 0000000..54b4dd8 --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/convert-sm103a.ll @@ -0,0 +1,297 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_100a -mattr=+ptx87 | FileCheck %s +; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_103a -mattr=+ptx87 | FileCheck %s +; RUN: %if ptxas-sm_100a && ptxas-isa-8.7 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_100a -mattr=+ptx87 | %ptxas-verify -arch=sm_100a %} +; RUN: %if ptxas-sm_103a && ptxas-isa-8.7 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_103a -mattr=+ptx87 | %ptxas-verify -arch=sm_103a %} + +; F16X2 conversions + +define <2 x half> @cvt_rs_f16x2_f32(float %f1, float %f2, i32 %rbits) { +; CHECK-LABEL: cvt_rs_f16x2_f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_f16x2_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_f16x2_f32_param_1]; +; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_f16x2_f32_param_2]; +; CHECK-NEXT: cvt.rs.f16x2.f32 %r4, %r1, %r2, %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; +; CHECK-NEXT: ret; + %val = call <2 x half> @llvm.nvvm.ff2f16x2.rs(float %f1, float %f2, i32 %rbits) + ret <2 x half> %val +} + +define <2 x half> @cvt_rs_relu_f16x2_f32(float %f1, float %f2, i32 %rbits) { +; CHECK-LABEL: cvt_rs_relu_f16x2_f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_relu_f16x2_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_relu_f16x2_f32_param_1]; +; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_relu_f16x2_f32_param_2]; +; CHECK-NEXT: cvt.rs.relu.f16x2.f32 %r4, %r1, %r2, %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; +; CHECK-NEXT: ret; + %val = call <2 x half> @llvm.nvvm.ff2f16x2.rs.relu(float %f1, float %f2, i32 %rbits) + ret <2 x half> %val +} + +define <2 x half> @cvt_rs_sf_f16x2_f32(float %f1, float %f2, i32 %rbits) { +; CHECK-LABEL: cvt_rs_sf_f16x2_f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_sf_f16x2_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_sf_f16x2_f32_param_1]; +; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_sf_f16x2_f32_param_2]; +; CHECK-NEXT: cvt.rs.satfinite.f16x2.f32 %r4, %r1, %r2, %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; +; CHECK-NEXT: ret; + %val = call <2 x half> @llvm.nvvm.ff2f16x2.rs.satfinite(float %f1, float %f2, i32 %rbits) + ret <2 x half> %val +} + +define <2 x half> @cvt_rs_relu_sf_f16x2_f32(float %f1, float %f2, i32 %rbits) { +; CHECK-LABEL: cvt_rs_relu_sf_f16x2_f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_relu_sf_f16x2_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_relu_sf_f16x2_f32_param_1]; +; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_relu_sf_f16x2_f32_param_2]; +; CHECK-NEXT: cvt.rs.relu.satfinite.f16x2.f32 %r4, %r1, %r2, %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; +; CHECK-NEXT: ret; + %val = call <2 x half> @llvm.nvvm.ff2f16x2.rs.relu.satfinite(float %f1, float %f2, i32 %rbits) + ret <2 x half> %val +} + +; BF16X2 conversions + +define <2 x bfloat> @cvt_rs_bf16x2_f32(float %f1, float %f2, i32 %rbits) { +; CHECK-LABEL: cvt_rs_bf16x2_f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_bf16x2_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_bf16x2_f32_param_1]; +; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_bf16x2_f32_param_2]; +; CHECK-NEXT: cvt.rs.bf16x2.f32 %r4, %r1, %r2, %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; +; CHECK-NEXT: ret; + %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs(float %f1, float %f2, i32 %rbits) + ret <2 x bfloat> %val +} + +define <2 x bfloat> @cvt_rs_relu_bf16x2_f32(float %f1, float %f2, i32 %rbits) { +; CHECK-LABEL: cvt_rs_relu_bf16x2_f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_relu_bf16x2_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_relu_bf16x2_f32_param_1]; +; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_relu_bf16x2_f32_param_2]; +; CHECK-NEXT: cvt.rs.relu.bf16x2.f32 %r4, %r1, %r2, %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; +; CHECK-NEXT: ret; + %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.relu(float %f1, float %f2, i32 %rbits) + ret <2 x bfloat> %val +} + +define <2 x bfloat> @cvt_rs_sf_bf16x2_f32(float %f1, float %f2, i32 %rbits) { +; CHECK-LABEL: cvt_rs_sf_bf16x2_f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_sf_bf16x2_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_sf_bf16x2_f32_param_1]; +; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_sf_bf16x2_f32_param_2]; +; CHECK-NEXT: cvt.rs.satfinite.bf16x2.f32 %r4, %r1, %r2, %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; +; CHECK-NEXT: ret; + %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.satfinite(float %f1, float %f2, i32 %rbits) + ret <2 x bfloat> %val +} + +define <2 x bfloat> @cvt_rs_relu_sf_bf16x2_f32(float %f1, float %f2, i32 %rbits) { +; CHECK-LABEL: cvt_rs_relu_sf_bf16x2_f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_relu_sf_bf16x2_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_relu_sf_bf16x2_f32_param_1]; +; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_relu_sf_bf16x2_f32_param_2]; +; CHECK-NEXT: cvt.rs.relu.satfinite.bf16x2.f32 %r4, %r1, %r2, %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; +; CHECK-NEXT: ret; + %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.relu.satfinite(float %f1, float %f2, i32 %rbits) + ret <2 x bfloat> %val +} + +; F8X4 conversions + +define <4 x i8> @cvt_rs_sf_e4m3x4_f32(<4 x float> %fvec, i32 %rbits) { +; CHECK-LABEL: cvt_rs_sf_e4m3x4_f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_sf_e4m3x4_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_sf_e4m3x4_f32_param_1]; +; CHECK-NEXT: cvt.rs.satfinite.e4m3x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; +; CHECK-NEXT: ret; + %val = call <4 x i8> @llvm.nvvm.f32x4.to.e4m3x4.rs.satfinite(<4 x float> %fvec, i32 %rbits) + ret <4 x i8> %val +} + +define <4 x i8> @cvt_rs_relu_sf_e4m3x4_f32(<4 x float> %fvec, i32 %rbits) { +; CHECK-LABEL: cvt_rs_relu_sf_e4m3x4_f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_relu_sf_e4m3x4_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_relu_sf_e4m3x4_f32_param_1]; +; CHECK-NEXT: cvt.rs.relu.satfinite.e4m3x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; +; CHECK-NEXT: ret; + %val = call <4 x i8> @llvm.nvvm.f32x4.to.e4m3x4.rs.relu.satfinite(<4 x float> %fvec, i32 %rbits) + ret <4 x i8> %val +} + +define <4 x i8> @cvt_rs_sf_e5m2x4_f32(<4 x float> %fvec, i32 %rbits) { +; CHECK-LABEL: cvt_rs_sf_e5m2x4_f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_sf_e5m2x4_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_sf_e5m2x4_f32_param_1]; +; CHECK-NEXT: cvt.rs.satfinite.e5m2x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; +; CHECK-NEXT: ret; + %val = call <4 x i8> @llvm.nvvm.f32x4.to.e5m2x4.rs.satfinite(<4 x float> %fvec, i32 %rbits) + ret <4 x i8> %val +} + +define <4 x i8> @cvt_rs_relu_sf_e5m2x4_f32(<4 x float> %fvec, i32 %rbits) { +; CHECK-LABEL: cvt_rs_relu_sf_e5m2x4_f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_relu_sf_e5m2x4_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_relu_sf_e5m2x4_f32_param_1]; +; CHECK-NEXT: cvt.rs.relu.satfinite.e5m2x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; +; CHECK-NEXT: ret; + %val = call <4 x i8> @llvm.nvvm.f32x4.to.e5m2x4.rs.relu.satfinite(<4 x float> %fvec, i32 %rbits) + ret <4 x i8> %val +} + +; F6X4 conversions + +define <4 x i8> @cvt_rs_sf_e2m3x4_f32(<4 x float> %fvec, i32 %rbits) { +; CHECK-LABEL: cvt_rs_sf_e2m3x4_f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_sf_e2m3x4_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_sf_e2m3x4_f32_param_1]; +; CHECK-NEXT: cvt.rs.satfinite.e2m3x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; +; CHECK-NEXT: ret; + %val = call <4 x i8> @llvm.nvvm.f32x4.to.e2m3x4.rs.satfinite(<4 x float> %fvec, i32 %rbits) + ret <4 x i8> %val +} + +define <4 x i8> @cvt_rs_relu_sf_e2m3x4_f32(<4 x float> %fvec, i32 %rbits) { +; CHECK-LABEL: cvt_rs_relu_sf_e2m3x4_f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_relu_sf_e2m3x4_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_relu_sf_e2m3x4_f32_param_1]; +; CHECK-NEXT: cvt.rs.relu.satfinite.e2m3x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; +; CHECK-NEXT: ret; + %val = call <4 x i8> @llvm.nvvm.f32x4.to.e2m3x4.rs.relu.satfinite(<4 x float> %fvec, i32 %rbits) + ret <4 x i8> %val +} + +define <4 x i8> @cvt_rs_sf_e3m2x4_f32(<4 x float> %fvec, i32 %rbits) { +; CHECK-LABEL: cvt_rs_sf_e3m2x4_f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_sf_e3m2x4_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_sf_e3m2x4_f32_param_1]; +; CHECK-NEXT: cvt.rs.satfinite.e3m2x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; +; CHECK-NEXT: ret; + %val = call <4 x i8> @llvm.nvvm.f32x4.to.e3m2x4.rs.satfinite(<4 x float> %fvec, i32 %rbits) + ret <4 x i8> %val +} + +define <4 x i8> @cvt_rs_relu_sf_e3m2x4_f32(<4 x float> %fvec, i32 %rbits) { +; CHECK-LABEL: cvt_rs_relu_sf_e3m2x4_f32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_relu_sf_e3m2x4_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_relu_sf_e3m2x4_f32_param_1]; +; CHECK-NEXT: cvt.rs.relu.satfinite.e3m2x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; +; CHECK-NEXT: ret; + %val = call <4 x i8> @llvm.nvvm.f32x4.to.e3m2x4.rs.relu.satfinite(<4 x float> %fvec, i32 %rbits) + ret <4 x i8> %val +} + +; F4X4 conversions + +define i16 @cvt_rs_sf_e2m1x4_f32(<4 x float> %fvec, i32 %rbits) { +; CHECK-LABEL: cvt_rs_sf_e2m1x4_f32( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<2>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_sf_e2m1x4_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_sf_e2m1x4_f32_param_1]; +; CHECK-NEXT: cvt.rs.satfinite.e2m1x4.f32 %rs1, {%r1, %r2, %r3, %r4}, %r5; +; CHECK-NEXT: cvt.u32.u16 %r6, %rs1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; +; CHECK-NEXT: ret; + %val = call i16 @llvm.nvvm.f32x4.to.e2m1x4.rs.satfinite(<4 x float> %fvec, i32 %rbits) + ret i16 %val +} + +define i16 @cvt_rs_relu_sf_e2m1x4_f32(<4 x float> %fvec, i32 %rbits) { +; CHECK-LABEL: cvt_rs_relu_sf_e2m1x4_f32( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<2>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_relu_sf_e2m1x4_f32_param_0]; +; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_relu_sf_e2m1x4_f32_param_1]; +; CHECK-NEXT: cvt.rs.relu.satfinite.e2m1x4.f32 %rs1, {%r1, %r2, %r3, %r4}, %r5; +; CHECK-NEXT: cvt.u32.u16 %r6, %rs1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; +; CHECK-NEXT: ret; + %val = call i16 @llvm.nvvm.f32x4.to.e2m1x4.rs.relu.satfinite(<4 x float> %fvec, i32 %rbits) + ret i16 %val +} diff --git a/llvm/test/CodeGen/NVPTX/wmma-ptx87-sm120a.py b/llvm/test/CodeGen/NVPTX/wmma-ptx87-sm120a.py index ae781df..40055ae 100644 --- a/llvm/test/CodeGen/NVPTX/wmma-ptx87-sm120a.py +++ b/llvm/test/CodeGen/NVPTX/wmma-ptx87-sm120a.py @@ -2,7 +2,7 @@ # RUN: %python %s --ptx=87 --gpu-arch=120 --aa > %t-ptx87-sm_120a.ll # RUN: llc < %t-ptx87-sm_120a.ll -mtriple=nvptx64 -mcpu=sm_120a -mattr=+ptx87 \ # RUN: | FileCheck %t-ptx87-sm_120a.ll -# RUN: %if ptxas-12.7 %{ \ +# RUN: %if ptxas-sm_120a && ptxas-isa-8.7 %{ \ # RUN: llc < %t-ptx87-sm_120a.ll -mtriple=nvptx64 -mcpu=sm_120a -mattr=+ptx87 \ # RUN: | %ptxas-verify -arch=sm_120a \ # RUN: %} diff --git a/llvm/test/CodeGen/NVPTX/wmma.py b/llvm/test/CodeGen/NVPTX/wmma.py index 6d73bce..8427ae4 100644 --- a/llvm/test/CodeGen/NVPTX/wmma.py +++ b/llvm/test/CodeGen/NVPTX/wmma.py @@ -90,6 +90,21 @@ class MMAFrag: "m16n8k32:b:s8": 2, "m16n8k32:c:s32": 4, "m16n8k32:d:s32": 4, + # e4m3/e5m2/e3m2/e2m3/e2m1 -> f16/f32 @ m16n8k16/m16n8k32 + "m16n8k16:a:e4m3": 2, + "m16n8k16:a:e5m2": 2, + "m16n8k32:a:e4m3": 4, + "m16n8k32:a:e5m2": 4, + "m16n8k32:a:e3m2": 4, + "m16n8k32:a:e2m3": 4, + "m16n8k32:a:e2m1": 4, + "m16n8k16:b:e4m3": 1, + "m16n8k16:b:e5m2": 1, + "m16n8k32:b:e4m3": 2, + "m16n8k32:b:e5m2": 2, + "m16n8k32:b:e3m2": 2, + "m16n8k32:b:e2m3": 2, + "m16n8k32:b:e2m1": 2, # mma sp "m16n8k32:a:bf16": 4, "m16n8k32:a:f16": 4, @@ -182,6 +197,18 @@ class MMAFrag: "m8n8k4:b:f64": 1, "m8n8k4:c:f64": 2, "m8n8k4:d:f64": 2, + "m16n8k4:a:f64": 2, + "m16n8k4:b:f64": 1, + "m16n8k4:c:f64": 4, + "m16n8k4:d:f64": 4, + "m16n8k8:a:f64": 4, + "m16n8k8:b:f64": 2, + "m16n8k8:c:f64": 4, + "m16n8k8:d:f64": 4, + "m16n8k16:a:f64": 8, + "m16n8k16:b:f64": 4, + "m16n8k16:c:f64": 4, + "m16n8k16:d:f64": 4, # tf32 -> s32 @ m16n16k8 "m16n16k8:a:tf32": 4, "m16n16k8:b:tf32": 4, @@ -324,7 +351,9 @@ def get_wmma_ops(): def get_mma_ops(): return ( - make_mma_ops(["m8n8k4"], ["f64"], [], ["f64"], []) + make_mma_ops( + ["m8n8k4", "m16n8k4", "m16n8k8", "m16n8k16"], ["f64"], [], ["f64"], [] + ) + make_mma_ops(["m16n8k4", "m16n8k8"], ["tf32"], [], ["f32"], []) + make_mma_ops(["m16n8k16", "m16n8k8"], ["bf16"], [], ["f32"], []) + make_mma_ops( @@ -341,6 +370,20 @@ def get_mma_ops(): ["m8n8k32", "m16n8k32", "m16n8k64"], ["s4", "u4"], ["s4", "u4"], ["s32"], [] ) + make_mma_ops(["m8n8k128", "m16n8k128", "m16n8k256"], ["b1"], [], ["s32"], []) + + make_mma_ops( + ["m16n8k16"], + ["e4m3", "e5m2"], + ["e4m3", "e5m2"], + ["f16", "f32"], + ["f16", "f32"], + ) + + make_mma_ops( + ["m16n8k32"], + ["e4m3", "e5m2", "e3m2", "e2m3", "e2m1"], + ["e4m3", "e5m2", "e3m2", "e2m3", "e2m1"], + ["f16", "f32"], + ["f16", "f32"], + ) ) @@ -492,7 +535,7 @@ def is_wmma_variant_supported(op, layout_a, layout_b, rnd, satf): return True -def is_mma_variant_supported(op, layout_a, layout_b, satf): +def is_mma_variant_supported(op, layout_a, layout_b, kind, satf): if not ( is_type_supported(op.a.mma_type.ptx_type) and is_mma_geom_supported(op.a.geom) ): @@ -516,13 +559,53 @@ def is_mma_variant_supported(op, layout_a, layout_b, satf): ): return False + if ( + op.a.geom != "m8n8k4" + and op.a.mma_type.ptx_type == "f64" + and (ptx_version < 78 or gpu_arch < 90) + ): + return False + # C and D type must be the same - if op.a.geom == "m16n8k16" and op.c.mma_type.ptx_type != op.d.mma_type.ptx_type: + if ( + op.a.geom in ["m16n8k16", "m16n8k32"] + and op.c.mma_type.ptx_type != op.d.mma_type.ptx_type + ): + return False + + if ( + op.a.geom in ["m16n8k16", "m16n8k32"] + and any( + x in ["e4m3", "e5m2"] + for x in (op.a.mma_type.ptx_type, op.b.mma_type.ptx_type) + ) + and ptx_version < 87 + ): + return False + + if kind != "" and not (ptx_version >= 87 and gpu_arch >= 120 and aa): + return False + + if kind != "" and ( + op.a.geom != "m16n8k32" + or op.a.mma_type.ptx_type not in ["e4m3", "e5m2", "e3m2", "e2m3", "e2m1"] + ): + return False + + if ( + kind == "" + and op.a.geom in ["m16n8k16", "m16n8k32"] + and any( + x in ["e3m2", "e2m3", "e2m1"] + for x in (op.a.mma_type.ptx_type, op.b.mma_type.ptx_type) + ) + ): return False # Require row/col layout for all MMA except m8n8k4 on FP16 if not (op.a.geom == "m8n8k4" and op.a.mma_type.ptx_type == "f16"): return layout_a == "row" and layout_b == "col" + return True @@ -937,7 +1020,12 @@ define ${ret_ty} @test_${function}( """ test_params = params - test_params["intrinsic"] = Template(intrinsic_template).substitute(params) + test_params["intrinsic"] = ( + Template(intrinsic_template) + .substitute(params) + .replace("::", ".") + .replace("_", ".") + ) test_params["function"] = test_params["intrinsic"].replace(".", "_") test_params["instruction"] = Template(instruction_template).substitute(params) test_params["ret_ty"] = make_wmma_ld_ret_ty(op.d) @@ -1002,16 +1090,20 @@ def gen_wmma_mma_tests(): def gen_mma_tests(): - mma_intrinsic_template = "llvm.nvvm.mma${b1op}.${geom}.${alayout}.${blayout}${satf}.${intrinsic_signature}" - mma_instruction_template = "mma.sync${aligned}.${geom}.${alayout}.${blayout}${satf}.${ptx_signature}${b1op}" + mma_intrinsic_template = "llvm.nvvm.mma${b1op}.${geom}.${alayout}.${blayout}${kind}${satf}.${intrinsic_signature}" + mma_instruction_template = "mma.sync${aligned}.${geom}.${alayout}.${blayout}${kind}${satf}.${ptx_signature}${b1op}" generated_items = [] - for op, alayout, blayout, satf in product( - get_mma_ops(), ["row", "col"], ["row", "col"], [".satfinite", ""] + for op, alayout, blayout, kind, satf in product( + get_mma_ops(), + ["row", "col"], + ["row", "col"], + ["", ".kind::f8f6f4"], + [".satfinite", ""], ): - if not is_mma_variant_supported(op, alayout, blayout, satf): + if not is_mma_variant_supported(op, alayout, blayout, kind, satf): continue for b1op in get_b1_ops(op.a.mma_type.ptx_type): @@ -1024,6 +1116,7 @@ def gen_mma_tests(): "satf": satf, "geom": op.a.geom, "b1op": b1op, + "kind": kind, } intrinsic_template = mma_intrinsic_template @@ -1105,9 +1198,9 @@ def is_mma_sp_variant_supported(op, metadata, kind, satf): ): return False - # C and D type must be the same for m16n8k16/m16n8k32 + # C and D type must be the same for m16n8k16/m16n8k32/m16n8k64 if ( - op.a.geom in ["m16n8k16", "m16n8k32"] + op.a.geom in ["m16n8k16", "m16n8k32", "m16n8k64"] and op.c.mma_type.ptx_type != op.d.mma_type.ptx_type ): return False diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir index 31de686..92e4588 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir @@ -148,21 +148,21 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $edi ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi + ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY1]](s32) ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx + ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[COPY2]](s32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]] - ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s8) - ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32) - ; CHECK-NEXT: G_BRCOND [[TRUNC]](s1), %bb.2 + ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s8) + ; CHECK-NEXT: G_BRCOND [[TRUNC1]](s1), %bb.2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1.cond.false: ; CHECK-NEXT: successors: %bb.2(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32) - ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2.cond.end: - ; CHECK-NEXT: [[PHI:%[0-9]+]]:_(s8) = G_PHI [[TRUNC2]](s8), %bb.1, [[TRUNC1]](s8), %bb.0 - ; CHECK-NEXT: $al = COPY [[PHI]](s8) + ; CHECK-NEXT: [[PHI:%[0-9]+]]:_(s1) = G_PHI [[TRUNC2]](s1), %bb.1, [[TRUNC]](s1), %bb.0 + ; CHECK-NEXT: [[EXT:%[0-9]+]]:_(s8) = G_ANYEXT [[PHI]](s1) + ; CHECK-NEXT: $al = COPY [[EXT]](s8) ; CHECK-NEXT: RET 0, implicit $al bb.1.entry: successors: %bb.3(0x40000000), %bb.2(0x40000000) diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-undef-vec-scaling.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-undef-vec-scaling.mir new file mode 100644 index 0000000..b02832b --- /dev/null +++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-undef-vec-scaling.mir @@ -0,0 +1,32 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=avx2 -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*' %s -o - | FileCheck %s --check-prefixes=CHECK,AVX2 +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=sse2 -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*' %s -o - | FileCheck %s --check-prefixes=CHECK,SSE2 +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=avx512f -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*' %s -o - | FileCheck %s --check-prefixes=CHECK,AVX512F + + +--- +name: test_basic_g_implicit_def_v8i64 +body: | + bb.0: + ; CHECK-LABEL: name: test_basic_g_implicit_def_v8i64 + ; AVX512F: {{%[0-9]+}}:_(<8 x s64>) = G_IMPLICIT_DEF + ; AVX2: [[DEF_AVX2:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF + ; AVX2-NEXT: {{%[0-9]+}}:_(<8 x s64>) = G_CONCAT_VECTORS [[DEF_AVX2]](<4 x s64>), [[DEF_AVX2]](<4 x s64>) + ; SSE2: [[DEF_SSE2:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF + ; SSE2-NEXT: {{%[0-9]+}}:_(<8 x s64>) = G_CONCAT_VECTORS [[DEF_SSE2]](<2 x s64>), [[DEF_SSE2]](<2 x s64>), [[DEF_SSE2]](<2 x s64>), [[DEF_SSE2]](<2 x s64>) + %0:_(<8 x s64>) = G_IMPLICIT_DEF + RET 0, implicit %0 +... + +--- +name: test_g_implicit_def_cample_size +body: | + bb.1: + ; CHECK-LABEL: name: test_g_implicit_def_cample_size + ; AVX512: {{%[0-9]+}}:_(<8 x s64>) = G_IMPLICIT_DEF + ; AVX2: {{%[0-9]+}}:_(<4 x s64>) = G_IMPLICIT_DEF + ; SSE2: {{%[0-9]+}}:_(<2 x s64>) = G_IMPLICIT_DEF + %0:_(<5 x s63>) = G_IMPLICIT_DEF + RET 0, implicit %0 +... + + diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec256.mir new file mode 100644 index 0000000..254c1b6 --- /dev/null +++ b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec256.mir @@ -0,0 +1,23 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s + +--- +name: select_cfb_vec256 +legalized: true +regBankSelected: true +registers: + - { id: 0, class: vecr, preferred-register: '', flags: [ ] } + - { id: 1, class: vecr, preferred-register: '', flags: [ ] } +body: | + bb.0: + liveins: $ymm0 + + ; CHECK-LABEL: name: select_cfb_vec256 + ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY $ymm0 + ; CHECK-NOT: G_CONSTANT_FOLD_BARRIER + ; CHECK-NEXT: $ymm1 = COPY [[COPY]] + ; CHECK-NEXT: RET 0, implicit $ymm1 + %0:vecr(<8 x s32>) = COPY $ymm0 + %1:vecr(<8 x s32>) = G_CONSTANT_FOLD_BARRIER %0 + $ymm1 = COPY %1(<8 x s32>) + RET 0, implicit $ymm1 +... diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec512.mir new file mode 100644 index 0000000..3da354b --- /dev/null +++ b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec512.mir @@ -0,0 +1,23 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s + +--- +name: select_cfb_vec512 +legalized: true +regBankSelected: true +registers: + - { id: 0, class: vecr, preferred-register: '', flags: [ ] } + - { id: 1, class: vecr, preferred-register: '', flags: [ ] } +body: | + bb.0: + liveins: $zmm0 + + ; CHECK-LABEL: name: select_cfb_vec512 + ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; CHECK-NOT: G_CONSTANT_FOLD_BARRIER + ; CHECK-NEXT: $zmm1 = COPY [[COPY]] + ; CHECK-NEXT: RET 0, implicit $zmm1 + %0:vecr(<8 x s64>) = COPY $zmm0 + %1:vecr(<8 x s64>) = G_CONSTANT_FOLD_BARRIER %0 + $zmm1 = COPY %1(<8 x s64>) + RET 0, implicit $zmm1 +... diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier.mir b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier.mir new file mode 100644 index 0000000..fa012f9 --- /dev/null +++ b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier.mir @@ -0,0 +1,77 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s + + +--- +name: select_cfb_scalar_s32 +legalized: true +regBankSelected: true +registers: + - { id: 0, class: gpr, preferred-register: '', flags: [ ] } + - { id: 1, class: gpr, preferred-register: '', flags: [ ] } +liveins: +fixedStack: +stack: +constants: +body: | + bb.0: + liveins: $edi + + ; CHECK-LABEL: name: select_cfb_scalar_s32 + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK-NOT: G_CONSTANT_FOLD_BARRIER + ; CHECK-NEXT: $eax = COPY [[COPY]] + ; CHECK-NEXT: RET 0, implicit $eax + %0:gpr(s32) = COPY $edi + %1:gpr(s32) = G_CONSTANT_FOLD_BARRIER %0 + $eax = COPY %1(s32) + RET 0, implicit $eax +... + +--- +name: select_cfb_scalar_s64 +legalized: true +regBankSelected: true +registers: + - { id: 0, class: gpr, preferred-register: '', flags: [ ] } + - { id: 1, class: gpr, preferred-register: '', flags: [ ] } +liveins: +fixedStack: +stack: +constants: +body: | + bb.0: + liveins: $rdi + + ; CHECK-LABEL: name: select_cfb_scalar_s64 + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK-NOT: G_CONSTANT_FOLD_BARRIER + ; CHECK-NEXT: $rax = COPY [[COPY]] + ; CHECK-NEXT: RET 0, implicit $rax + %0:gpr(s64) = COPY $rdi + %1:gpr(s64) = G_CONSTANT_FOLD_BARRIER %0 + $rax = COPY %1(s64) + RET 0, implicit $rax +... + + +--- +name: select_cfb_vec128 +legalized: true +regBankSelected: true +registers: + - { id: 0, class: vecr, preferred-register: '', flags: [ ] } + - { id: 1, class: vecr, preferred-register: '', flags: [ ] } +body: | + bb.0: + liveins: $xmm0 + + ; CHECK-LABEL: name: select_cfb_vec128 + ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 + ; CHECK-NOT: G_CONSTANT_FOLD_BARRIER + ; CHECK-NEXT: $xmm1 = COPY [[COPY]] + ; CHECK-NEXT: RET 0, implicit $xmm1 + %0:vecr(<4 x s32>) = COPY $xmm0 + %1:vecr(<4 x s32>) = G_CONSTANT_FOLD_BARRIER %0 + $xmm1 = COPY %1(<4 x s32>) + RET 0, implicit $xmm1 +... diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec256.mir new file mode 100644 index 0000000..11251e4 --- /dev/null +++ b/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec256.mir @@ -0,0 +1,23 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s + +--- +name: select_freeze_vec256 +legalized: true +regBankSelected: true +registers: + - { id: 0, class: vecr, preferred-register: '', flags: [ ] } + - { id: 1, class: vecr, preferred-register: '', flags: [ ] } +body: | + bb.0: + liveins: $ymm0 + + ; CHECK-LABEL: name: select_freeze_vec256 + ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY $ymm0 + ; CHECK-NOT: G_FREEZE + ; CHECK-NEXT: $ymm1 = COPY [[COPY]] + ; CHECK-NEXT: RET 0, implicit $ymm1 + %0:vecr(<8 x s32>) = COPY $ymm0 + %1:vecr(<8 x s32>) = G_FREEZE %0 + $ymm1 = COPY %1(<8 x s32>) + RET 0, implicit $ymm1 +... diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec512.mir new file mode 100644 index 0000000..bcf299a --- /dev/null +++ b/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec512.mir @@ -0,0 +1,23 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s + +--- +name: select_freeze_vec512 +legalized: true +regBankSelected: true +registers: + - { id: 0, class: vecr, preferred-register: '', flags: [ ] } + - { id: 1, class: vecr, preferred-register: '', flags: [ ] } +body: | + bb.0: + liveins: $zmm0 + + ; CHECK-LABEL: name: select_freeze_vec512 + ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 + ; CHECK-NOT: G_FREEZE + ; CHECK-NEXT: $zmm1 = COPY [[COPY]] + ; CHECK-NEXT: RET 0, implicit $zmm1 + %0:vecr(<8 x s64>) = COPY $zmm0 + %1:vecr(<8 x s64>) = G_FREEZE %0 + $zmm1 = COPY %1(<8 x s64>) + RET 0, implicit $zmm1 +... diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-freeze.mir b/llvm/test/CodeGen/X86/GlobalISel/select-freeze.mir new file mode 100644 index 0000000..cf5ad47 --- /dev/null +++ b/llvm/test/CodeGen/X86/GlobalISel/select-freeze.mir @@ -0,0 +1,77 @@ +# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s + + +--- +name: select_freeze_scalar_s32 +legalized: true +regBankSelected: true +registers: + - { id: 0, class: gpr, preferred-register: '', flags: [ ] } + - { id: 1, class: gpr, preferred-register: '', flags: [ ] } +liveins: +fixedStack: +stack: +constants: +body: | + bb.0: + liveins: $edi + + ; CHECK-LABEL: name: select_freeze_scalar_s32 + ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; CHECK-NOT: G_FREEZE + ; CHECK-NEXT: $eax = COPY [[COPY]] + ; CHECK-NEXT: RET 0, implicit $eax + %0:gpr(s32) = COPY $edi + %1:gpr(s32) = G_FREEZE %0 + $eax = COPY %1(s32) + RET 0, implicit $eax +... + +--- +name: select_freeze_scalar_s64 +legalized: true +regBankSelected: true +registers: + - { id: 0, class: gpr, preferred-register: '', flags: [ ] } + - { id: 1, class: gpr, preferred-register: '', flags: [ ] } +liveins: +fixedStack: +stack: +constants: +body: | + bb.0: + liveins: $rdi + + ; CHECK-LABEL: name: select_freeze_scalar_s64 + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; CHECK-NOT: G_FREEZE + ; CHECK-NEXT: $rax = COPY [[COPY]] + ; CHECK-NEXT: RET 0, implicit $rax + %0:gpr(s64) = COPY $rdi + %1:gpr(s64) = G_FREEZE %0 + $rax = COPY %1(s64) + RET 0, implicit $rax +... + + +--- +name: select_freeze_vec128 +legalized: true +regBankSelected: true +registers: + - { id: 0, class: vecr, preferred-register: '', flags: [ ] } + - { id: 1, class: vecr, preferred-register: '', flags: [ ] } +body: | + bb.0: + liveins: $xmm0 + + ; CHECK-LABEL: name: select_freeze_vec128 + ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 + ; CHECK-NOT: G_FREEZE + ; CHECK-NEXT: $xmm1 = COPY [[COPY]] + ; CHECK-NEXT: RET 0, implicit $xmm1 + %0:vecr(<4 x s32>) = COPY $xmm0 + %1:vecr(<4 x s32>) = G_FREEZE %0 + $xmm1 = COPY %1(<4 x s32>) + RET 0, implicit $xmm1 +... diff --git a/llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll b/llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll index d0e7c1c2..e1e1611 100644 --- a/llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll +++ b/llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll @@ -80,8 +80,8 @@ cleanup2: ; CHECK: cleanup2.corodispatch: ; CHECK: %1 = phi i8 [ 0, %handler2 ], [ 1, %catch.dispatch.2 ] ; CHECK: %2 = cleanuppad within %h1 [] -; CHECK: %switch = icmp ult i8 %1, 1 -; CHECK: br i1 %switch, label %cleanup2.from.handler2, label %cleanup2.from.catch.dispatch.2 +; CHECK: %3 = icmp eq i8 %1, 0 +; CHECK: br i1 %3, label %cleanup2.from.handler2, label %cleanup2.from.catch.dispatch.2 ; CHECK: cleanup2.from.handler2: ; CHECK: %valueB.reload = load i32, ptr %valueB.spill.addr, align 4 diff --git a/llvm/test/Transforms/SCCP/relax-range-checks.ll b/llvm/test/Transforms/SCCP/relax-range-checks.ll index 90722f3..34e4813 100644 --- a/llvm/test/Transforms/SCCP/relax-range-checks.ll +++ b/llvm/test/Transforms/SCCP/relax-range-checks.ll @@ -89,4 +89,28 @@ define i1 @relax_range_check_multiuse(i8 range(i8 0, 5) %x) { ret i1 %ret } +define i1 @range_check_to_icmp_eq1(i32 range(i32 0, 4) %x) { +; CHECK-LABEL: define i1 @range_check_to_icmp_eq1( +; CHECK-SAME: i32 range(i32 0, 4) [[X:%.*]]) { +; CHECK-NEXT: [[OFF:%.*]] = add nsw i32 [[X]], -3 +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 3 +; CHECK-NEXT: ret i1 [[TMP1]] +; + %off = add nsw i32 %x, -3 + %cmp = icmp ult i32 %off, 2 + ret i1 %cmp +} + +define i1 @range_check_to_icmp_eq2(i32 range(i32 -1, 2) %x) { +; CHECK-LABEL: define i1 @range_check_to_icmp_eq2( +; CHECK-SAME: i32 range(i32 -1, 2) [[X:%.*]]) { +; CHECK-NEXT: [[OFF:%.*]] = add nsw i32 [[X]], -1 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], 1 +; CHECK-NEXT: ret i1 [[CMP]] +; + %off = add nsw i32 %x, -1 + %cmp = icmp ult i32 %off, -2 + ret i1 %cmp +} + declare void @use(i8) diff --git a/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll b/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll index 4a457cc..a0e29dd 100644 --- a/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll @@ -7,8 +7,7 @@ declare void @foo(i32) define void @test(i1 %a) { ; CHECK-LABEL: define void @test( ; CHECK-SAME: i1 [[A:%.*]]) { -; CHECK-NEXT: [[A_OFF:%.*]] = add i1 [[A]], true -; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i1 [[A_OFF]], true +; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i1 [[A]], true ; CHECK-NEXT: br i1 [[SWITCH]], label [[TRUE:%.*]], label [[FALSE:%.*]] ; CHECK: common.ret: ; CHECK-NEXT: ret void @@ -209,8 +208,7 @@ define void @test5(i8 %a) { ; CHECK-SAME: i8 [[A:%.*]]) { ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[A]], 2 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) -; CHECK-NEXT: [[A_OFF:%.*]] = add i8 [[A]], -1 -; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[A_OFF]], 1 +; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i8 [[A]], 1 ; CHECK-NEXT: br i1 [[SWITCH]], label [[TRUE:%.*]], label [[FALSE:%.*]] ; CHECK: common.ret: ; CHECK-NEXT: ret void @@ -243,8 +241,7 @@ define void @test6(i8 %a) { ; CHECK-NEXT: [[AND:%.*]] = and i8 [[A]], -2 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[AND]], -2 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) -; CHECK-NEXT: [[A_OFF:%.*]] = add i8 [[A]], 1 -; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[A_OFF]], 1 +; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i8 [[A]], -1 ; CHECK-NEXT: br i1 [[SWITCH]], label [[TRUE:%.*]], label [[FALSE:%.*]] ; CHECK: common.ret: ; CHECK-NEXT: ret void @@ -279,8 +276,7 @@ define void @test7(i8 %a) { ; CHECK-NEXT: [[AND:%.*]] = and i8 [[A]], -2 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[AND]], -2 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) -; CHECK-NEXT: [[A_OFF:%.*]] = add i8 [[A]], 1 -; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[A_OFF]], 1 +; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i8 [[A]], -1 ; CHECK-NEXT: br i1 [[SWITCH]], label [[TRUE:%.*]], label [[FALSE:%.*]] ; CHECK: common.ret: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll b/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll index 8f2ae2d..0fc3c19 100644 --- a/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll @@ -188,4 +188,217 @@ exit: ret void } +define i32 @wrapping_known_range(i8 range(i8 0, 6) %arg) { +; CHECK-LABEL: @wrapping_known_range( +; CHECK-NEXT: [[ARG_OFF:%.*]] = add i8 [[ARG:%.*]], -1 +; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[ARG_OFF]], 3 +; CHECK-NEXT: br i1 [[SWITCH]], label [[ELSE:%.*]], label [[IF:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[I0:%.*]], [[IF]] ], [ [[I1:%.*]], [[ELSE]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] +; CHECK: if: +; CHECK-NEXT: [[I0]] = call i32 @f(i32 0) +; CHECK-NEXT: br label [[COMMON_RET:%.*]] +; CHECK: else: +; CHECK-NEXT: [[I1]] = call i32 @f(i32 1) +; CHECK-NEXT: br label [[COMMON_RET]] +; + switch i8 %arg, label %else [ + i8 0, label %if + i8 4, label %if + i8 5, label %if + ] + +if: + %i0 = call i32 @f(i32 0) + ret i32 %i0 + +else: + %i1 = call i32 @f(i32 1) + ret i32 %i1 +} + +define i32 @wrapping_known_range_2(i8 range(i8 0, 6) %arg) { +; CHECK-LABEL: @wrapping_known_range_2( +; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i8 [[ARG:%.*]], 1 +; CHECK-NEXT: br i1 [[SWITCH]], label [[ELSE:%.*]], label [[IF:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[I0:%.*]], [[IF]] ], [ [[I1:%.*]], [[ELSE]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] +; CHECK: if: +; CHECK-NEXT: [[I0]] = call i32 @f(i32 0) +; CHECK-NEXT: br label [[COMMON_RET:%.*]] +; CHECK: else: +; CHECK-NEXT: [[I1]] = call i32 @f(i32 1) +; CHECK-NEXT: br label [[COMMON_RET]] +; + switch i8 %arg, label %else [ + i8 0, label %if + i8 2, label %if + i8 3, label %if + i8 4, label %if + i8 5, label %if + ] + +if: + %i0 = call i32 @f(i32 0) + ret i32 %i0 + +else: + %i1 = call i32 @f(i32 1) + ret i32 %i1 +} + +define i32 @wrapping_range(i8 %arg) { +; CHECK-LABEL: @wrapping_range( +; CHECK-NEXT: [[ARG_OFF:%.*]] = add i8 [[ARG:%.*]], -1 +; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[ARG_OFF]], -4 +; CHECK-NEXT: br i1 [[SWITCH]], label [[ELSE:%.*]], label [[IF:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[I0:%.*]], [[IF]] ], [ [[I1:%.*]], [[ELSE]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] +; CHECK: if: +; CHECK-NEXT: [[I0]] = call i32 @f(i32 0) +; CHECK-NEXT: br label [[COMMON_RET:%.*]] +; CHECK: else: +; CHECK-NEXT: [[I1]] = call i32 @f(i32 1) +; CHECK-NEXT: br label [[COMMON_RET]] +; + switch i8 %arg, label %else [ + i8 0, label %if + i8 -3, label %if + i8 -2, label %if + i8 -1, label %if + ] + +if: + %i0 = call i32 @f(i32 0) + ret i32 %i0 + +else: + %i1 = call i32 @f(i32 1) + ret i32 %i1 +} + +define i8 @wrapping_range_phi(i8 %arg) { +; CHECK-LABEL: @wrapping_range_phi( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[ARG_OFF:%.*]] = add i8 [[ARG:%.*]], -1 +; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[ARG_OFF]], -2 +; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[SWITCH]], i8 0, i8 1 +; CHECK-NEXT: ret i8 [[SPEC_SELECT]] +; +entry: + switch i8 %arg, label %else [ + i8 0, label %if + i8 -1, label %if + ] + +if: + %i = phi i8 [ 0, %else ], [ 1, %entry ], [ 1, %entry ] + ret i8 %i + +else: + br label %if +} + +define i32 @no_continuous_wrapping_range(i8 %arg) { +; CHECK-LABEL: @no_continuous_wrapping_range( +; CHECK-NEXT: switch i8 [[ARG:%.*]], label [[ELSE:%.*]] [ +; CHECK-NEXT: i8 0, label [[IF:%.*]] +; CHECK-NEXT: i8 -3, label [[IF]] +; CHECK-NEXT: i8 -1, label [[IF]] +; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[I0:%.*]], [[IF]] ], [ [[I1:%.*]], [[ELSE]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] +; CHECK: if: +; CHECK-NEXT: [[I0]] = call i32 @f(i32 0) +; CHECK-NEXT: br label [[COMMON_RET:%.*]] +; CHECK: else: +; CHECK-NEXT: [[I1]] = call i32 @f(i32 1) +; CHECK-NEXT: br label [[COMMON_RET]] +; + switch i8 %arg, label %else [ + i8 0, label %if + i8 -3, label %if + i8 -1, label %if + ] + +if: + %i0 = call i32 @f(i32 0) + ret i32 %i0 + +else: + %i1 = call i32 @f(i32 1) + ret i32 %i1 +} + +define i32 @one_case_1(i32 %x) { +; CHECK-LABEL: @one_case_1( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i32 [[X:%.*]], 10 +; CHECK-NEXT: br i1 [[SWITCH]], label [[A:%.*]], label [[B:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[TMP0:%.*]], [[B]] ], [ [[TMP1:%.*]], [[A]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] +; CHECK: a: +; CHECK-NEXT: [[TMP0]] = call i32 @f(i32 0) +; CHECK-NEXT: br label [[COMMON_RET:%.*]] +; CHECK: b: +; CHECK-NEXT: [[TMP1]] = call i32 @f(i32 1) +; CHECK-NEXT: br label [[COMMON_RET]] +; +entry: + switch i32 %x, label %unreachable [ + i32 5, label %a + i32 6, label %a + i32 7, label %a + i32 10, label %b + ] + +unreachable: + unreachable +a: + %0 = call i32 @f(i32 0) + ret i32 %0 +b: + %1 = call i32 @f(i32 1) + ret i32 %1 +} + +define i32 @one_case_2(i32 %x) { +; CHECK-LABEL: @one_case_2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i32 [[X:%.*]], 5 +; CHECK-NEXT: br i1 [[SWITCH]], label [[A:%.*]], label [[B:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[TMP0:%.*]], [[A]] ], [ [[TMP1:%.*]], [[B]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] +; CHECK: a: +; CHECK-NEXT: [[TMP0]] = call i32 @f(i32 0) +; CHECK-NEXT: br label [[COMMON_RET:%.*]] +; CHECK: b: +; CHECK-NEXT: [[TMP1]] = call i32 @f(i32 1) +; CHECK-NEXT: br label [[COMMON_RET]] +; +entry: + switch i32 %x, label %unreachable [ + i32 5, label %a + i32 10, label %b + i32 11, label %b + i32 12, label %b + i32 13, label %b + ] + +unreachable: + unreachable +a: + %0 = call i32 @f(i32 0) + ret i32 %0 +b: + %1 = call i32 @f(i32 1) + ret i32 %1 +} + declare void @bar(ptr nonnull dereferenceable(4)) diff --git a/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s b/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s new file mode 100644 index 0000000..65e1203 --- /dev/null +++ b/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s @@ -0,0 +1,8 @@ +REQUIRES: aarch64-registered-target + +RUN: llvm-exegesis -mtriple=aarch64 -mcpu=neoverse-v2 -mode=latency --dump-object-to-disk=%d --opcode-name=FMOVWSr --benchmark-phase=assemble-measured-code 2>&1 +RUN: llvm-objdump -d %d > %t.s +RUN: FileCheck %s < %t.s + +CHECK-NOT: ld{{[1-4]}} +CHECK-NOT: st{{[1-4]}} diff --git a/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2.s b/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2.s index d777d31..8e0d47e 100644 --- a/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2.s +++ b/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2.s @@ -153,12 +153,12 @@ vpshrdw $1, (%rax), %zmm17, %zmm19 {k1}{z} # CHECK-NEXT: 2 8 1.00 * vpcompressw %zmm16, (%rax) {%k1} # CHECK-NEXT: 1 1 1.00 vpcompressw %zmm16, %zmm19 {%k1} {z} # CHECK-NEXT: 1 1 1.00 U vpexpandb %zmm16, %zmm19 -# CHECK-NEXT: 2 8 1.00 U vpexpandb (%rax), %zmm19 +# CHECK-NEXT: 2 8 1.00 * U vpexpandb (%rax), %zmm19 # CHECK-NEXT: 1 1 1.00 vpexpandb %zmm16, %zmm19 {%k1} # CHECK-NEXT: 2 8 1.00 * vpexpandb (%rax), %zmm19 {%k1} # CHECK-NEXT: 1 1 1.00 vpexpandb %zmm16, %zmm19 {%k1} {z} # CHECK-NEXT: 1 1 1.00 U vpexpandw %zmm16, %zmm19 -# CHECK-NEXT: 2 8 1.00 U vpexpandw (%rax), %zmm19 +# CHECK-NEXT: 2 8 1.00 * U vpexpandw (%rax), %zmm19 # CHECK-NEXT: 1 1 1.00 vpexpandw %zmm16, %zmm19 {%k1} # CHECK-NEXT: 2 8 1.00 * vpexpandw (%rax), %zmm19 {%k1} # CHECK-NEXT: 1 1 1.00 vpexpandw %zmm16, %zmm19 {%k1} {z} diff --git a/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2vl.s b/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2vl.s index 99b88fe..f6be964 100644 --- a/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2vl.s +++ b/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2vl.s @@ -295,22 +295,22 @@ vpshrdw $1, (%rax), %ymm17, %ymm19 {k1}{z} # CHECK-NEXT: 2 8 1.00 * vpcompressw %ymm16, (%rax) {%k1} # CHECK-NEXT: 1 1 1.00 vpcompressw %ymm16, %ymm19 {%k1} {z} # CHECK-NEXT: 1 1 1.00 U vpexpandb %xmm16, %xmm19 -# CHECK-NEXT: 2 8 1.00 U vpexpandb (%rax), %xmm19 +# CHECK-NEXT: 2 8 1.00 * U vpexpandb (%rax), %xmm19 # CHECK-NEXT: 1 1 1.00 vpexpandb %xmm16, %xmm19 {%k1} # CHECK-NEXT: 2 8 1.00 * vpexpandb (%rax), %xmm19 {%k1} # CHECK-NEXT: 1 1 1.00 vpexpandb %xmm16, %xmm19 {%k1} {z} # CHECK-NEXT: 1 1 1.00 U vpexpandb %ymm16, %ymm19 -# CHECK-NEXT: 2 8 1.00 U vpexpandb (%rax), %ymm19 +# CHECK-NEXT: 2 8 1.00 * U vpexpandb (%rax), %ymm19 # CHECK-NEXT: 1 1 1.00 vpexpandb %ymm16, %ymm19 {%k1} # CHECK-NEXT: 2 8 1.00 * vpexpandb (%rax), %ymm19 {%k1} # CHECK-NEXT: 1 1 1.00 vpexpandb %ymm16, %ymm19 {%k1} {z} # CHECK-NEXT: 1 1 1.00 U vpexpandw %xmm16, %xmm19 -# CHECK-NEXT: 2 8 1.00 U vpexpandw (%rax), %xmm19 +# CHECK-NEXT: 2 8 1.00 * U vpexpandw (%rax), %xmm19 # CHECK-NEXT: 1 1 1.00 vpexpandw %xmm16, %xmm19 {%k1} # CHECK-NEXT: 2 8 1.00 * vpexpandw (%rax), %xmm19 {%k1} # CHECK-NEXT: 1 1 1.00 vpexpandw %xmm16, %xmm19 {%k1} {z} # CHECK-NEXT: 1 1 1.00 U vpexpandw %ymm16, %ymm19 -# CHECK-NEXT: 2 8 1.00 U vpexpandw (%rax), %ymm19 +# CHECK-NEXT: 2 8 1.00 * U vpexpandw (%rax), %ymm19 # CHECK-NEXT: 1 1 1.00 vpexpandw %ymm16, %ymm19 {%k1} # CHECK-NEXT: 2 8 1.00 * vpexpandw (%rax), %ymm19 {%k1} # CHECK-NEXT: 1 1 1.00 vpexpandw %ymm16, %ymm19 {%k1} {z} diff --git a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2.s b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2.s index 08f07dc..5c987ee 100644 --- a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2.s +++ b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2.s @@ -153,12 +153,12 @@ vpshrdw $1, (%rax), %zmm17, %zmm19 {k1}{z} # CHECK-NEXT: 2 10 1.00 * vpcompressw %zmm16, (%rax) {%k1} # CHECK-NEXT: 1 3 1.00 vpcompressw %zmm16, %zmm19 {%k1} {z} # CHECK-NEXT: 1 3 1.00 U vpexpandb %zmm16, %zmm19 -# CHECK-NEXT: 2 10 1.00 U vpexpandb (%rax), %zmm19 +# CHECK-NEXT: 2 10 1.00 * U vpexpandb (%rax), %zmm19 # CHECK-NEXT: 1 3 1.00 vpexpandb %zmm16, %zmm19 {%k1} # CHECK-NEXT: 2 10 1.00 * vpexpandb (%rax), %zmm19 {%k1} # CHECK-NEXT: 1 3 1.00 vpexpandb %zmm16, %zmm19 {%k1} {z} # CHECK-NEXT: 1 3 1.00 U vpexpandw %zmm16, %zmm19 -# CHECK-NEXT: 2 10 1.00 U vpexpandw (%rax), %zmm19 +# CHECK-NEXT: 2 10 1.00 * U vpexpandw (%rax), %zmm19 # CHECK-NEXT: 1 3 1.00 vpexpandw %zmm16, %zmm19 {%k1} # CHECK-NEXT: 2 10 1.00 * vpexpandw (%rax), %zmm19 {%k1} # CHECK-NEXT: 1 3 1.00 vpexpandw %zmm16, %zmm19 {%k1} {z} diff --git a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2vl.s b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2vl.s index 0194303..023026b 100644 --- a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2vl.s +++ b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2vl.s @@ -295,22 +295,22 @@ vpshrdw $1, (%rax), %ymm17, %ymm19 {k1}{z} # CHECK-NEXT: 2 10 1.00 * vpcompressw %ymm16, (%rax) {%k1} # CHECK-NEXT: 1 3 1.00 vpcompressw %ymm16, %ymm19 {%k1} {z} # CHECK-NEXT: 1 3 1.00 U vpexpandb %xmm16, %xmm19 -# CHECK-NEXT: 2 10 1.00 U vpexpandb (%rax), %xmm19 +# CHECK-NEXT: 2 10 1.00 * U vpexpandb (%rax), %xmm19 # CHECK-NEXT: 1 3 1.00 vpexpandb %xmm16, %xmm19 {%k1} # CHECK-NEXT: 2 10 1.00 * vpexpandb (%rax), %xmm19 {%k1} # CHECK-NEXT: 1 3 1.00 vpexpandb %xmm16, %xmm19 {%k1} {z} # CHECK-NEXT: 1 3 1.00 U vpexpandb %ymm16, %ymm19 -# CHECK-NEXT: 2 10 1.00 U vpexpandb (%rax), %ymm19 +# CHECK-NEXT: 2 10 1.00 * U vpexpandb (%rax), %ymm19 # CHECK-NEXT: 1 3 1.00 vpexpandb %ymm16, %ymm19 {%k1} # CHECK-NEXT: 2 10 1.00 * vpexpandb (%rax), %ymm19 {%k1} # CHECK-NEXT: 1 3 1.00 vpexpandb %ymm16, %ymm19 {%k1} {z} # CHECK-NEXT: 1 3 1.00 U vpexpandw %xmm16, %xmm19 -# CHECK-NEXT: 2 10 1.00 U vpexpandw (%rax), %xmm19 +# CHECK-NEXT: 2 10 1.00 * U vpexpandw (%rax), %xmm19 # CHECK-NEXT: 1 3 1.00 vpexpandw %xmm16, %xmm19 {%k1} # CHECK-NEXT: 2 10 1.00 * vpexpandw (%rax), %xmm19 {%k1} # CHECK-NEXT: 1 3 1.00 vpexpandw %xmm16, %xmm19 {%k1} {z} # CHECK-NEXT: 1 3 1.00 U vpexpandw %ymm16, %ymm19 -# CHECK-NEXT: 2 10 1.00 U vpexpandw (%rax), %ymm19 +# CHECK-NEXT: 2 10 1.00 * U vpexpandw (%rax), %ymm19 # CHECK-NEXT: 1 3 1.00 vpexpandw %ymm16, %ymm19 {%k1} # CHECK-NEXT: 2 10 1.00 * vpexpandw (%rax), %ymm19 {%k1} # CHECK-NEXT: 1 3 1.00 vpexpandw %ymm16, %ymm19 {%k1} {z} diff --git a/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2.s b/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2.s index ed8a417..db1f9af 100644 --- a/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2.s +++ b/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2.s @@ -153,12 +153,12 @@ vpshrdw $1, (%rax), %zmm17, %zmm19 {k1}{z} # CHECK-NEXT: 6 14 2.00 * vpcompressw %zmm16, (%rax) {%k1} # CHECK-NEXT: 2 6 2.00 vpcompressw %zmm16, %zmm19 {%k1} {z} # CHECK-NEXT: 2 3 2.00 U vpexpandb %zmm16, %zmm19 -# CHECK-NEXT: 3 11 2.00 U vpexpandb (%rax), %zmm19 +# CHECK-NEXT: 3 11 2.00 * U vpexpandb (%rax), %zmm19 # CHECK-NEXT: 2 8 2.00 vpexpandb %zmm16, %zmm19 {%k1} # CHECK-NEXT: 3 13 2.00 * vpexpandb (%rax), %zmm19 {%k1} # CHECK-NEXT: 2 8 2.00 vpexpandb %zmm16, %zmm19 {%k1} {z} # CHECK-NEXT: 2 3 2.00 U vpexpandw %zmm16, %zmm19 -# CHECK-NEXT: 3 11 2.00 U vpexpandw (%rax), %zmm19 +# CHECK-NEXT: 3 11 2.00 * U vpexpandw (%rax), %zmm19 # CHECK-NEXT: 2 8 2.00 vpexpandw %zmm16, %zmm19 {%k1} # CHECK-NEXT: 3 13 2.00 * vpexpandw (%rax), %zmm19 {%k1} # CHECK-NEXT: 2 8 2.00 vpexpandw %zmm16, %zmm19 {%k1} {z} diff --git a/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2vl.s b/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2vl.s index 3db09bc..9277a91 100644 --- a/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2vl.s +++ b/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2vl.s @@ -295,22 +295,22 @@ vpshrdw $1, (%rax), %ymm17, %ymm19 {k1}{z} # CHECK-NEXT: 6 14 2.00 * vpcompressw %ymm16, (%rax) {%k1} # CHECK-NEXT: 2 6 2.00 vpcompressw %ymm16, %ymm19 {%k1} {z} # CHECK-NEXT: 2 3 2.00 U vpexpandb %xmm16, %xmm19 -# CHECK-NEXT: 3 10 2.00 U vpexpandb (%rax), %xmm19 +# CHECK-NEXT: 3 10 2.00 * U vpexpandb (%rax), %xmm19 # CHECK-NEXT: 2 8 2.00 vpexpandb %xmm16, %xmm19 {%k1} # CHECK-NEXT: 3 13 2.00 * vpexpandb (%rax), %xmm19 {%k1} # CHECK-NEXT: 2 8 2.00 vpexpandb %xmm16, %xmm19 {%k1} {z} # CHECK-NEXT: 2 3 2.00 U vpexpandb %ymm16, %ymm19 -# CHECK-NEXT: 3 11 2.00 U vpexpandb (%rax), %ymm19 +# CHECK-NEXT: 3 11 2.00 * U vpexpandb (%rax), %ymm19 # CHECK-NEXT: 2 8 2.00 vpexpandb %ymm16, %ymm19 {%k1} # CHECK-NEXT: 3 13 2.00 * vpexpandb (%rax), %ymm19 {%k1} # CHECK-NEXT: 2 8 2.00 vpexpandb %ymm16, %ymm19 {%k1} {z} # CHECK-NEXT: 2 3 2.00 U vpexpandw %xmm16, %xmm19 -# CHECK-NEXT: 3 10 2.00 U vpexpandw (%rax), %xmm19 +# CHECK-NEXT: 3 10 2.00 * U vpexpandw (%rax), %xmm19 # CHECK-NEXT: 2 8 2.00 vpexpandw %xmm16, %xmm19 {%k1} # CHECK-NEXT: 3 13 2.00 * vpexpandw (%rax), %xmm19 {%k1} # CHECK-NEXT: 2 8 2.00 vpexpandw %xmm16, %xmm19 {%k1} {z} # CHECK-NEXT: 2 3 2.00 U vpexpandw %ymm16, %ymm19 -# CHECK-NEXT: 3 11 2.00 U vpexpandw (%rax), %ymm19 +# CHECK-NEXT: 3 11 2.00 * U vpexpandw (%rax), %ymm19 # CHECK-NEXT: 2 8 2.00 vpexpandw %ymm16, %ymm19 {%k1} # CHECK-NEXT: 3 13 2.00 * vpexpandw (%rax), %ymm19 {%k1} # CHECK-NEXT: 2 8 2.00 vpexpandw %ymm16, %ymm19 {%k1} {z} diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2.s index 594518d..88e140d 100644 --- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2.s +++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2.s @@ -153,12 +153,12 @@ vpshrdw $1, (%rax), %zmm17, %zmm19 {k1}{z} # CHECK-NEXT: 2 8 0.50 * vpcompressw %zmm16, (%rax) {%k1} # CHECK-NEXT: 1 5 1.00 vpcompressw %zmm16, %zmm19 {%k1} {z} # CHECK-NEXT: 1 5 1.00 U vpexpandb %zmm16, %zmm19 -# CHECK-NEXT: 2 8 0.50 U vpexpandb (%rax), %zmm19 +# CHECK-NEXT: 2 8 0.50 * U vpexpandb (%rax), %zmm19 # CHECK-NEXT: 1 5 1.00 vpexpandb %zmm16, %zmm19 {%k1} # CHECK-NEXT: 2 8 0.50 * vpexpandb (%rax), %zmm19 {%k1} # CHECK-NEXT: 1 5 1.00 vpexpandb %zmm16, %zmm19 {%k1} {z} # CHECK-NEXT: 1 5 1.00 U vpexpandw %zmm16, %zmm19 -# CHECK-NEXT: 2 8 0.50 U vpexpandw (%rax), %zmm19 +# CHECK-NEXT: 2 8 0.50 * U vpexpandw (%rax), %zmm19 # CHECK-NEXT: 1 5 1.00 vpexpandw %zmm16, %zmm19 {%k1} # CHECK-NEXT: 2 8 0.50 * vpexpandw (%rax), %zmm19 {%k1} # CHECK-NEXT: 1 5 1.00 vpexpandw %zmm16, %zmm19 {%k1} {z} diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2vl.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2vl.s index 7b9c2516..325835a 100644 --- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2vl.s +++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2vl.s @@ -295,22 +295,22 @@ vpshrdw $1, (%rax), %ymm17, %ymm19 {k1}{z} # CHECK-NEXT: 2 8 0.50 * vpcompressw %ymm16, (%rax) {%k1} # CHECK-NEXT: 1 4 1.00 vpcompressw %ymm16, %ymm19 {%k1} {z} # CHECK-NEXT: 2 1 0.50 U vpexpandb %xmm16, %xmm19 -# CHECK-NEXT: 2 8 0.50 U vpexpandb (%rax), %xmm19 +# CHECK-NEXT: 2 8 0.50 * U vpexpandb (%rax), %xmm19 # CHECK-NEXT: 2 1 0.50 vpexpandb %xmm16, %xmm19 {%k1} # CHECK-NEXT: 2 8 0.50 * vpexpandb (%rax), %xmm19 {%k1} # CHECK-NEXT: 2 1 0.50 vpexpandb %xmm16, %xmm19 {%k1} {z} # CHECK-NEXT: 1 4 1.00 U vpexpandb %ymm16, %ymm19 -# CHECK-NEXT: 2 8 0.50 U vpexpandb (%rax), %ymm19 +# CHECK-NEXT: 2 8 0.50 * U vpexpandb (%rax), %ymm19 # CHECK-NEXT: 1 4 1.00 vpexpandb %ymm16, %ymm19 {%k1} # CHECK-NEXT: 2 8 0.50 * vpexpandb (%rax), %ymm19 {%k1} # CHECK-NEXT: 1 4 1.00 vpexpandb %ymm16, %ymm19 {%k1} {z} # CHECK-NEXT: 2 1 0.50 U vpexpandw %xmm16, %xmm19 -# CHECK-NEXT: 2 8 0.50 U vpexpandw (%rax), %xmm19 +# CHECK-NEXT: 2 8 0.50 * U vpexpandw (%rax), %xmm19 # CHECK-NEXT: 2 1 0.50 vpexpandw %xmm16, %xmm19 {%k1} # CHECK-NEXT: 2 8 0.50 * vpexpandw (%rax), %xmm19 {%k1} # CHECK-NEXT: 2 1 0.50 vpexpandw %xmm16, %xmm19 {%k1} {z} # CHECK-NEXT: 1 4 1.00 U vpexpandw %ymm16, %ymm19 -# CHECK-NEXT: 2 8 0.50 U vpexpandw (%rax), %ymm19 +# CHECK-NEXT: 2 8 0.50 * U vpexpandw (%rax), %ymm19 # CHECK-NEXT: 1 4 1.00 vpexpandw %ymm16, %ymm19 {%k1} # CHECK-NEXT: 2 8 0.50 * vpexpandw (%rax), %ymm19 {%k1} # CHECK-NEXT: 1 4 1.00 vpexpandw %ymm16, %ymm19 {%k1} {z} diff --git a/llvm/tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp b/llvm/tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp index bdfc93e..707e6ee 100644 --- a/llvm/tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp +++ b/llvm/tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp @@ -57,6 +57,12 @@ computeAliasingInstructions(const LLVMState &State, const Instruction *Instr, continue; if (OtherInstr.hasMemoryOperands()) continue; + // Filtering out loads/stores might belong in hasMemoryOperands(), but that + // complicates things as there are instructions with may load/store that + // don't have operands (e.g. X86's CLUI instruction). So, it's easier to + // filter them out here. + if (OtherInstr.Description.mayLoad() || OtherInstr.Description.mayStore()) + continue; if (!ET.allowAsBackToBack(OtherInstr)) continue; if (Instr->hasAliasingRegistersThrough(OtherInstr, ForbiddenRegisters)) diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h index a2409f2..67ac487 100644 --- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h +++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h @@ -131,8 +131,8 @@ struct BufferResultsToOutParamsOpts { /// Allocator function: Generate a memref allocation with the given type. /// Since `promoteBufferResultsToOutParams` doesn't allow dynamically shaped /// results, we don't allow passing a range of values for dynamic dims. - using AllocationFn = - std::function<FailureOr<Value>(OpBuilder &, Location, MemRefType)>; + using AllocationFn = std::function<FailureOr<Value>(OpBuilder &, Location, + MemRefType, ValueRange)>; /// Memcpy function: Generate a memcpy between two memrefs. using MemCpyFn = @@ -147,8 +147,9 @@ struct BufferResultsToOutParamsOpts { /// Allocation function; used to allocate a memref. /// Default memref.alloc is used AllocationFn allocationFn = [](OpBuilder &builder, Location loc, - MemRefType type) { - return memref::AllocOp::create(builder, loc, type).getResult(); + MemRefType type, ValueRange dynamicSizes) { + return memref::AllocOp::create(builder, loc, type, dynamicSizes) + .getResult(); }; /// Memcpy function; used to create a copy between two memrefs. @@ -166,6 +167,10 @@ struct BufferResultsToOutParamsOpts { /// If true, the pass eliminates the memref.alloc and memcpy if the returned /// memref is allocated in the current function. bool hoistStaticAllocs = false; + + /// If true, the pass eliminates the memref.alloc and memcpy if the returned + /// memref is allocated in the current function and has dynamic shape. + bool hoistDynamicAllocs = false; }; /// Replace buffers that are returned from a function with an out parameter. diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td index a0d113c..cad44cb 100644 --- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td @@ -256,6 +256,8 @@ def BufferResultsToOutParamsPass "Add the attribute 'bufferize.result' to all output parameters.">, Option<"hoistStaticAllocs", "hoist-static-allocs", "bool", /*default=*/"false", "Hoist static allocations to call sites.">, + Option<"hoistDynamicAllocs", "hoist-dynamic-allocs", "bool", + /*default=*/"false", "Hoist dynamic allocations to call sites.">, ]; let dependentDialects = ["memref::MemRefDialect"]; } diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td index f56c1e5..f8e3167 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td @@ -2160,8 +2160,9 @@ class NVVM_MMA_SUPPORTED<list<WMMA_REGS> frags, string layout_a, string layout_b !or(!ne(a_type, b_type), !ne(c_type, d_type))): false, - // m16n8k8 requires C and D to be the same type. - !and(!eq(geom, "m16n8k8"), + // m16n8k16/m16n8k32 requires C and D to be the same type + !and(!or(!eq(geom, "m16n8k16"), + !eq(geom, "m16n8k32")), !ne(c_type, d_type)): false, // All other are OK. diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp index e30e094..25f941d 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp @@ -23,6 +23,8 @@ namespace bufferization { using namespace mlir; using AllocationFn = bufferization::BufferResultsToOutParamsOpts::AllocationFn; using MemCpyFn = bufferization::BufferResultsToOutParamsOpts::MemCpyFn; +using AllocDynamicSizesMap = + llvm::DenseMap<func::FuncOp, SmallVector<SmallVector<Value>>>; /// Return `true` if the given MemRef type has a fully dynamic layout. static bool hasFullyDynamicLayoutMap(MemRefType type) { @@ -43,6 +45,50 @@ static bool hasStaticIdentityLayout(MemRefType type) { return type.getLayout().isIdentity(); } +/// Return the dynamic shapes of the `memref` based on the defining op. If the +/// complete dynamic shape fails to be captured, return an empty value. +/// Currently, only function block arguments are supported for capturing. +static SmallVector<Value> getDynamicSize(Value memref, func::FuncOp funcOp) { + Operation *defOp = memref.getDefiningOp(); + if (!defOp) + return {}; + auto operands = defOp->getOperands(); + SmallVector<Value> dynamicSizes; + for (Value size : operands) { + if (!isa<IndexType>(size.getType())) + continue; + + BlockArgument sizeSrc = dyn_cast<BlockArgument>(size); + if (!sizeSrc) + return {}; + auto arguments = funcOp.getArguments(); + auto iter = llvm::find(arguments, sizeSrc); + if (iter == arguments.end()) + return {}; + dynamicSizes.push_back(*iter); + } + return dynamicSizes; +} + +/// Returns the dynamic sizes at the callee, through the call relationship +/// between the caller and callee. +static SmallVector<Value> mapDynamicSizeAtCaller(func::CallOp call, + func::FuncOp callee, + ValueRange dynamicSizes) { + SmallVector<Value> mappedDynamicSizes; + for (Value size : dynamicSizes) { + for (auto [src, dst] : + llvm::zip_first(call.getOperands(), callee.getArguments())) { + if (size != dst) + continue; + mappedDynamicSizes.push_back(src); + } + } + assert(mappedDynamicSizes.size() == dynamicSizes.size() && + "could not find all dynamic sizes"); + return mappedDynamicSizes; +} + // Updates the func op and entry block. // // Any args appended to the entry block are added to `appendedEntryArgs`. @@ -109,6 +155,7 @@ updateFuncOp(func::FuncOp func, // the given out-params. static LogicalResult updateReturnOps(func::FuncOp func, ArrayRef<BlockArgument> appendedEntryArgs, + AllocDynamicSizesMap &map, const bufferization::BufferResultsToOutParamsOpts &options) { auto res = func.walk([&](func::ReturnOp op) { SmallVector<Value, 6> copyIntoOutParams; @@ -120,12 +167,22 @@ updateReturnOps(func::FuncOp func, ArrayRef<BlockArgument> appendedEntryArgs, keepAsReturnOperands.push_back(operand); } OpBuilder builder(op); + SmallVector<SmallVector<Value>> dynamicSizes; for (auto [orig, arg] : llvm::zip(copyIntoOutParams, appendedEntryArgs)) { - if (options.hoistStaticAllocs && + bool hoistStaticAllocs = + options.hoistStaticAllocs && + cast<MemRefType>(orig.getType()).hasStaticShape(); + bool hoistDynamicAllocs = + options.hoistDynamicAllocs && + !cast<MemRefType>(orig.getType()).hasStaticShape(); + if ((hoistStaticAllocs || hoistDynamicAllocs) && isa_and_nonnull<bufferization::AllocationOpInterface>( - orig.getDefiningOp()) && - mlir::cast<MemRefType>(orig.getType()).hasStaticShape()) { + orig.getDefiningOp())) { orig.replaceAllUsesWith(arg); + if (hoistDynamicAllocs) { + SmallVector<Value> dynamicSize = getDynamicSize(orig, func); + dynamicSizes.push_back(dynamicSize); + } orig.getDefiningOp()->erase(); } else { if (failed(options.memCpyFn(builder, op.getLoc(), orig, arg))) @@ -134,6 +191,10 @@ updateReturnOps(func::FuncOp func, ArrayRef<BlockArgument> appendedEntryArgs, } func::ReturnOp::create(builder, op.getLoc(), keepAsReturnOperands); op.erase(); + auto dynamicSizePair = + std::pair<func::FuncOp, SmallVector<SmallVector<Value>>>(func, + dynamicSizes); + map.insert(dynamicSizePair); return WalkResult::advance(); }); return failure(res.wasInterrupted()); @@ -142,7 +203,7 @@ updateReturnOps(func::FuncOp func, ArrayRef<BlockArgument> appendedEntryArgs, // Updates all CallOps in the scope of the given ModuleOp by allocating // temporary buffers for newly introduced out params. static LogicalResult -updateCalls(ModuleOp module, +updateCalls(ModuleOp module, const AllocDynamicSizesMap &map, const bufferization::BufferResultsToOutParamsOpts &options) { bool didFail = false; SymbolTable symtab(module); @@ -166,8 +227,15 @@ updateCalls(ModuleOp module, } SmallVector<Value, 6> outParams; OpBuilder builder(op); + SmallVector<SmallVector<Value>> dynamicSizes = map.lookup(callee); + size_t dynamicSizesIndex = 0; for (Value memref : replaceWithOutParams) { - if (!cast<MemRefType>(memref.getType()).hasStaticShape()) { + SmallVector<Value> dynamicSize = dynamicSizes.size() > dynamicSizesIndex + ? dynamicSizes[dynamicSizesIndex] + : SmallVector<Value>(); + bool memrefStaticShape = + cast<MemRefType>(memref.getType()).hasStaticShape(); + if (!memrefStaticShape && dynamicSize.empty()) { op.emitError() << "cannot create out param for dynamically shaped result"; didFail = true; @@ -177,8 +245,15 @@ updateCalls(ModuleOp module, auto allocType = MemRefType::get(memrefType.getShape(), memrefType.getElementType(), AffineMap(), memrefType.getMemorySpace()); + + if (memrefStaticShape) { + dynamicSize = {}; + } else { + ++dynamicSizesIndex; + dynamicSize = mapDynamicSizeAtCaller(op, callee, dynamicSize); + } auto maybeOutParam = - options.allocationFn(builder, op.getLoc(), allocType); + options.allocationFn(builder, op.getLoc(), allocType, dynamicSize); if (failed(maybeOutParam)) { op.emitError() << "failed to create allocation op"; didFail = true; @@ -213,6 +288,9 @@ updateCalls(ModuleOp module, LogicalResult mlir::bufferization::promoteBufferResultsToOutParams( ModuleOp module, const bufferization::BufferResultsToOutParamsOpts &options) { + // It maps the shape source of the dynamic shape memref returned by each + // function. + AllocDynamicSizesMap map; for (auto func : module.getOps<func::FuncOp>()) { if (!options.filterFn(&func)) continue; @@ -222,11 +300,11 @@ LogicalResult mlir::bufferization::promoteBufferResultsToOutParams( return failure(); if (func.isExternal()) continue; - if (failed(updateReturnOps(func, appendedEntryArgs, options))) { + if (failed(updateReturnOps(func, appendedEntryArgs, map, options))) { return failure(); } } - if (failed(updateCalls(module, options))) + if (failed(updateCalls(module, map, options))) return failure(); return success(); } @@ -243,6 +321,8 @@ struct BufferResultsToOutParamsPass options.addResultAttribute = true; if (hoistStaticAllocs) options.hoistStaticAllocs = true; + if (hoistDynamicAllocs) + options.hoistDynamicAllocs = true; if (failed(bufferization::promoteBufferResultsToOutParams(getOperation(), options))) diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp index 682bf8c..e8f8824 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp @@ -648,6 +648,9 @@ LogicalResult MmaOp::verify() { expectedB.emplace_back(unitB, multiplicandFragType); allowedShapes.push_back({16, 8, kFactor}); allowedShapes.push_back({16, 8, kFactor * 2}); + + if (resultPtxType() != accumPtxType()) + return emitOpError("ctype does not match dtype"); } // In the M=8 case, there is only 1 possible case per data type. diff --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir index 627abd0..b7ca71a 100644 --- a/mlir/test/Dialect/LLVMIR/invalid.mlir +++ b/mlir/test/Dialect/LLVMIR/invalid.mlir @@ -743,6 +743,36 @@ func.func @nvvm_invalid_mma_8(%a0 : i32, %a1 : i32, // ----- +// f32 return type, f16 accumulate type +llvm.func @nvvm_mma_m16n8k16_f32_f16(%a0 : vector<2xf16>, %a1 : vector<2xf16>, + %a2 : vector<2xf16>, %a3 : vector<2xf16>, + %b0 : vector<2xf16>, %b1 : vector<2xf16>, + %c0 : vector<2xf16>, %c1 : vector<2xf16>) -> !llvm.struct<(f32, f32, f32, f32)> { + // C and D should have the same type according to PTX ISA + // expected-error@+1 {{'nvvm.mma.sync' op ctype does not match dtype}} + %0 = nvvm.mma.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1] C[%c0, %c1] + {layoutA = #nvvm.mma_layout<row>, layoutB = #nvvm.mma_layout<col>, + shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(f32, f32, f32, f32)> + llvm.return %0 : !llvm.struct<(f32, f32, f32, f32)> +} + +// ----- + +// f16 return type, f32 accumulate type +llvm.func @nvvm_mma_m16n8k16_f16_f32(%a0 : vector<2xf16>, %a1 : vector<2xf16>, + %a2 : vector<2xf16>, %a3 : vector<2xf16>, + %b0 : vector<2xf16>, %b1 : vector<2xf16>, + %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)> { + // C and D should have the same type according to PTX ISA + // expected-error@+1 {{'nvvm.mma.sync' op ctype does not match dtype}} + %0 = nvvm.mma.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1] C[%c0, %c1, %c2, %c3] + {layoutA = #nvvm.mma_layout<row>, layoutB = #nvvm.mma_layout<col>, + shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)> + llvm.return %0 : !llvm.struct<(vector<2xf16>, vector<2xf16>)> +} + +// ----- + func.func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr, %f32 : f32) { // expected-error@+1 {{op failed to verify that result #0 and operand #1 have the same type}} %0 = "llvm.atomicrmw"(%f32_ptr, %f32) {bin_op=11, ordering=1} : (!llvm.ptr, f32) -> i32 diff --git a/mlir/test/Dialect/LLVMIR/nvvm.mlir b/mlir/test/Dialect/LLVMIR/nvvm.mlir index 3277e62..0243f5e 100644 --- a/mlir/test/Dialect/LLVMIR/nvvm.mlir +++ b/mlir/test/Dialect/LLVMIR/nvvm.mlir @@ -227,30 +227,6 @@ func.func @nvvm_mma_m16n8k16_f16_f16(%a0 : vector<2xf16>, %a1 : vector<2xf16>, llvm.return %0 : !llvm.struct<(vector<2xf16>, vector<2xf16>)> } -// CHECK-LABEL: @nvvm_mma_m16n8k16_f32_f16 -func.func @nvvm_mma_m16n8k16_f32_f16(%a0 : vector<2xf16>, %a1 : vector<2xf16>, - %a2 : vector<2xf16>, %a3 : vector<2xf16>, - %b0 : vector<2xf16>, %b1 : vector<2xf16>, - %c0 : vector<2xf16>, %c1 : vector<2xf16>) { - // CHECK: nvvm.mma.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}] {layoutA = #nvvm.mma_layout<row>, layoutB = #nvvm.mma_layout<col>, shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(f32, f32, f32, f32)> - %0 = nvvm.mma.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1] C[%c0, %c1] - {layoutA = #nvvm.mma_layout<row>, layoutB = #nvvm.mma_layout<col>, - shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (vector<2xf16>,vector<2xf16>,vector<2xf16>) -> !llvm.struct<(f32, f32, f32, f32)> - llvm.return %0 : !llvm.struct<(f32, f32, f32, f32)> -} - -// CHECK-LABEL: @nvvm_mma_m16n8k16_f16_f32 -func.func @nvvm_mma_m16n8k16_f16_f32(%a0 : vector<2xf16>, %a1 : vector<2xf16>, - %a2 : vector<2xf16>, %a3 : vector<2xf16>, - %b0 : vector<2xf16>, %b1 : vector<2xf16>, - %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32) { - // CHECK: nvvm.mma.sync A[{{.*}}, {{.*}}, {{.*}}, {{.*}}] B[{{.*}}, {{.*}}] C[{{.*}}, {{.*}}, {{.*}}, {{.*}}] {layoutA = #nvvm.mma_layout<row>, layoutB = #nvvm.mma_layout<col>, shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)> - %0 = nvvm.mma.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1] C[%c0, %c1, %c2, %c3] - {layoutA = #nvvm.mma_layout<row>, layoutB = #nvvm.mma_layout<col>, - shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)> - llvm.return %0 : !llvm.struct<(vector<2xf16>, vector<2xf16>)> -} - // CHECK-LABEL: @nvvm_mma_m16n8k16_f32_f32 func.func @nvvm_mma_m16n8k16_f32_f32(%a0 : vector<2xf16>, %a1 : vector<2xf16>, %a2 : vector<2xf16>, %a3 : vector<2xf16>, diff --git a/mlir/test/Target/LLVMIR/nvvmir.mlir b/mlir/test/Target/LLVMIR/nvvmir.mlir index 62aeb07..00a479d 100644 --- a/mlir/test/Target/LLVMIR/nvvmir.mlir +++ b/mlir/test/Target/LLVMIR/nvvmir.mlir @@ -302,32 +302,6 @@ llvm.func @nvvm_mma_m16n8k16_bf16_bf16(%a0 : i32, %a1 : i32, %a2 : i32, %a3 : i3 llvm.return %0 : !llvm.struct<(f32, f32, f32, f32)> } -// f32 return type, f16 accumulate type -// CHECK-LABEL: @nvvm_mma_m16n8k16_f32_f16 -llvm.func @nvvm_mma_m16n8k16_f32_f16(%a0 : vector<2xf16>, %a1 : vector<2xf16>, - %a2 : vector<2xf16>, %a3 : vector<2xf16>, - %b0 : vector<2xf16>, %b1 : vector<2xf16>, - %c0 : vector<2xf16>, %c1 : vector<2xf16>) -> !llvm.struct<(f32, f32, f32, f32)> { - // CHECK: call { float, float, float, float } @llvm.nvvm.mma.m16n8k16.row.col.f32.f16 - %0 = nvvm.mma.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1] C[%c0, %c1] - {layoutA = #nvvm.mma_layout<row>, layoutB = #nvvm.mma_layout<col>, - shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (vector<2xf16>, vector<2xf16>, vector<2xf16>) -> !llvm.struct<(f32, f32, f32, f32)> - llvm.return %0 : !llvm.struct<(f32, f32, f32, f32)> -} - -// f16 return type, f32 accumulate type -// CHECK-LABEL: @nvvm_mma_m16n8k16_f16_f32 -llvm.func @nvvm_mma_m16n8k16_f16_f32(%a0 : vector<2xf16>, %a1 : vector<2xf16>, - %a2 : vector<2xf16>, %a3 : vector<2xf16>, - %b0 : vector<2xf16>, %b1 : vector<2xf16>, - %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)> { - // CHECK: call { <2 x half>, <2 x half> } @llvm.nvvm.mma.m16n8k16.row.col.f16.f32 - %0 = nvvm.mma.sync A[%a0, %a1, %a2, %a3] B[%b0, %b1] C[%c0, %c1, %c2, %c3] - {layoutA = #nvvm.mma_layout<row>, layoutB = #nvvm.mma_layout<col>, - shape = #nvvm.shape<m = 16, n = 8, k = 16>} : (vector<2xf16>, vector<2xf16>, f32) -> !llvm.struct<(vector<2xf16>, vector<2xf16>)> - llvm.return %0 : !llvm.struct<(vector<2xf16>, vector<2xf16>)> -} - // f32 return type, f32 accumulate type // CHECK-LABEL: @nvvm_mma_m16n8k16_f32_f32 llvm.func @nvvm_mma_m16n8k16_f32_f32(%a0 : vector<2xf16>, %a1 : vector<2xf16>, diff --git a/mlir/test/Transforms/buffer-results-to-out-params-hosit-dynamic-allocs.mlir b/mlir/test/Transforms/buffer-results-to-out-params-hosit-dynamic-allocs.mlir new file mode 100644 index 0000000..f33eb8e --- /dev/null +++ b/mlir/test/Transforms/buffer-results-to-out-params-hosit-dynamic-allocs.mlir @@ -0,0 +1,79 @@ +// RUN: mlir-opt -allow-unregistered-dialect -p 'builtin.module(buffer-results-to-out-params{hoist-dynamic-allocs})' %s -split-input-file | FileCheck %s + +func.func private @single_alloc(%size : index) -> (memref<?xf32>) { + %alloc = memref.alloc(%size) : memref<?xf32> + return %alloc : memref<?xf32> +} + +func.func @single_alloc_test(%size : index) { + %alloc = call @single_alloc(%size) : (index) -> (memref<?xf32>) + "test.sink"(%alloc) : (memref<?xf32>) -> () +} + +// CHECK-LABEL: func.func private @single_alloc( +// CHECK-SAME: %{{.*}}: index, +// CHECK-SAME: %{{.*}}: memref<?xf32>) { + +// CHECK-LABEL: func.func @single_alloc_test( +// CHECK-SAME: %[[size:.*]]: index) { +// CHECK: %[[alloc:.*]] = memref.alloc(%[[size]]) : memref<?xf32> +// CHECK: call @single_alloc(%[[size]], %[[alloc]]) : (index, memref<?xf32>) -> () +// CHECK: "test.sink"(%[[alloc]]) : (memref<?xf32>) -> () +// CHECK: } + +// ----- + +func.func private @mult_alloc(%size0 : index, %size1 : index) -> (memref<?x?xf32>, memref<?xf32>) { + %alloc0 = memref.alloc(%size0, %size1) : memref<?x?xf32> + %alloc1 = memref.alloc(%size1) : memref<?xf32> + return %alloc0, %alloc1 : memref<?x?xf32>, memref<?xf32> +} + +func.func @mult_alloc_test(%size0 : index, %size1: index) { + %alloc0, %alloc1 = call @mult_alloc(%size0, %size1) : (index, index) -> (memref<?x?xf32>, memref<?xf32>) + "test.sink"(%alloc0, %alloc1) : (memref<?x?xf32>, memref<?xf32>) -> () +} + +// CHECK-LABEL: func private @mult_alloc( +// CHECK-SAME: %{{.*}}: index, %{{.*}}: index, +// CHECK-SAME: %{{.*}}: memref<?x?xf32>, %{{.*}}: memref<?xf32>) { + +// CHECK-LABEL: func @mult_alloc_test( +// CHECK-SAME: %[[size0:.*]]: index, +// CHECK-SAME: %[[size1:.*]]: index) { +// CHECK: %[[alloc0:.*]] = memref.alloc(%[[size0]], %[[size1]]) : memref<?x?xf32> +// CHECK: %[[alloc1:.*]] = memref.alloc(%[[size1]]) : memref<?xf32> +// CHECK: call @mult_alloc(%[[size0]], %[[size1]], %[[alloc0]], %[[alloc1]]) : (index, index, memref<?x?xf32>, memref<?xf32>) -> () +// CHECK: "test.sink"(%[[alloc0]], %[[alloc1]]) : (memref<?x?xf32>, memref<?xf32>) -> () +// CHECK: } + + +// ----- + +func.func private @complex_alloc(%size0 : index, %size1 : index) -> (memref<?x?xf32>, memref<4xf32>, memref<?xf32>) { + %alloc0 = memref.alloc(%size0, %size1) : memref<?x?xf32> + %alloc1 = memref.alloc() : memref<4xf32> + %alloc2 = memref.alloc(%size1) : memref<?xf32> + return %alloc0, %alloc1, %alloc2 : memref<?x?xf32>, memref<4xf32>, memref<?xf32> +} + +func.func @complex_alloc_test(%size0 : index, %size1: index) { + %alloc0, %alloc1, %alloc2 = call @complex_alloc(%size0, %size1) : (index, index) -> (memref<?x?xf32>, memref<4xf32>, memref<?xf32>) + "test.sink"(%alloc0, %alloc1, %alloc2) : (memref<?x?xf32>, memref<4xf32>, memref<?xf32>) -> () +} + +// CHECK-LABEL: func private @complex_alloc( +// CHECK-SAME: %{{.*}}: index, %{{.*}}: index, +// CHECK-SAME: %{{.*}}: memref<?x?xf32>, +// CHECK-SAME: %{{.*}}: memref<4xf32>, +// CHECK-SAME: %{{.*}}: memref<?xf32>) { + +// CHECK-LABEL: func @complex_alloc_test( +// CHECK-SAME: %[[size0:.*]]: index, +// CHECK-SAME: %[[size1:.*]]: index) { +// CHECK: %[[alloc0:.*]] = memref.alloc(%[[size0]], %[[size1]]) : memref<?x?xf32> +// CHECK: %[[alloc1:.*]] = memref.alloc() : memref<4xf32> +// CHECK: %[[alloc2:.*]] = memref.alloc(%[[size1]]) : memref<?xf32> +// CHECK: call @complex_alloc(%[[size0]], %[[size1]], %[[alloc0]], %[[alloc1]], %[[alloc2]]) : (index, index, memref<?x?xf32>, memref<4xf32>, memref<?xf32>) -> () +// CHECK: "test.sink"(%[[alloc0]], %[[alloc1]], %[[alloc2]]) : (memref<?x?xf32>, memref<4xf32>, memref<?xf32>) -> () +// CHECK: } diff --git a/mlir/test/Transforms/buffer-results-to-out-params-elim.mlir b/mlir/test/Transforms/buffer-results-to-out-params-hosit-static-allocs.mlir index 2783836..2783836 100644 --- a/mlir/test/Transforms/buffer-results-to-out-params-elim.mlir +++ b/mlir/test/Transforms/buffer-results-to-out-params-hosit-static-allocs.mlir diff --git a/orc-rt/include/orc-rt/WrapperFunction.h b/orc-rt/include/orc-rt/WrapperFunction.h index 47e770f..c43f1c5 100644 --- a/orc-rt/include/orc-rt/WrapperFunction.h +++ b/orc-rt/include/orc-rt/WrapperFunction.h @@ -16,7 +16,9 @@ #include "orc-rt-c/WrapperFunction.h" #include "orc-rt/CallableTraitsHelper.h" #include "orc-rt/Error.h" +#include "orc-rt/ExecutorAddress.h" #include "orc-rt/bind.h" +#include "orc-rt/move_only_function.h" #include <utility> @@ -205,6 +207,128 @@ struct ResultDeserializer<std::tuple<Error>, Serializer> { /// wrapper functions in C++. struct WrapperFunction { + /// Wraps an asynchronous method (a method returning void, and taking a + /// return callback as its first argument) for use with + /// WrapperFunction::handle. + /// + /// AsyncMethod's call operator takes an ExecutorAddr as its second argument, + /// casts it to a ClassT*, and then calls the wrapped method on that pointer, + /// forwarding the return callback and any subsequent arguments (after the + /// second argument representing the object address). + /// + /// This utility removes some of the boilerplate from writing wrappers for + /// method calls. + template <typename ClassT, typename ReturnT, typename... ArgTs> + struct AsyncMethod { + AsyncMethod(void (ClassT::*M)(ReturnT, ArgTs...)) : M(M) {} + void operator()(ReturnT &&Return, ExecutorAddr Obj, ArgTs &&...Args) { + (Obj.toPtr<ClassT *>()->*M)(std::forward<ReturnT>(Return), + std::forward<ArgTs>(Args)...); + } + + private: + void (ClassT::*M)(ReturnT, ArgTs...); + }; + + /// Create an AsyncMethod wrapper for the given method pointer. The given + /// method should be asynchronous: returning void, and taking a return + /// callback as its first argument. + /// + /// The handWithAsyncMethod function can be used to remove some of the + /// boilerplate from writing wrappers for method calls: + /// + /// @code{.cpp} + /// class MyClass { + /// public: + /// void myMethod(move_only_function<void(std::string)> Return, + // uint32_t X, bool Y) { ... } + /// }; + /// + /// // SPS Method signature -- note MyClass object address as first + /// // argument. + /// using SPSMyMethodWrapperSignature = + /// SPSString(SPSExecutorAddr, uint32_t, bool); + /// + /// + /// static void adder_add_async_sps_wrapper( + /// orc_rt_SessionRef Session, void *CallCtx, + /// orc_rt_WrapperFunctionReturn Return, + /// orc_rt_WrapperFunctionBuffer ArgBytes) { + /// using SPSSig = SPSString(SPSExecutorAddr, int32_t, bool); + /// SPSWrapperFunction<SPSSig>::handle( + /// Session, CallCtx, Return, ArgBytes, + /// WrapperFunction::handleWithAsyncMethod(&MyClass::myMethod)); + /// } + /// @endcode + /// + template <typename ClassT, typename ReturnT, typename... ArgTs> + static AsyncMethod<ClassT, ReturnT, ArgTs...> + handleWithAsyncMethod(void (ClassT::*M)(ReturnT, ArgTs...)) { + return AsyncMethod<ClassT, ReturnT, ArgTs...>(M); + } + + /// Wraps a synchronous method (an ordinary method that returns its result, + /// as opposed to an asynchronous method, see AsyncMethod) for use with + /// WrapperFunction::handle. + /// + /// SyncMethod's call operator takes a return callback as its first argument + /// and an ExecutorAddr as its second argument. The ExecutorAddr argument is + /// cast to a ClassT*, and then called passing the subsequent arguments + /// (after the second argument representing the object address). The Return + /// callback is then called on the value returned from the method. + /// + /// This utility removes some of the boilerplate from writing wrappers for + /// method calls. + template <typename ClassT, typename RetT, typename... ArgTs> + class SyncMethod { + public: + SyncMethod(RetT (ClassT::*M)(ArgTs...)) : M(M) {} + + void operator()(move_only_function<void(RetT)> Return, ExecutorAddr Obj, + ArgTs &&...Args) { + Return((Obj.toPtr<ClassT *>()->*M)(std::forward<ArgTs>(Args)...)); + } + + private: + RetT (ClassT::*M)(ArgTs...); + }; + + /// Create an SyncMethod wrapper for the given method pointer. The given + /// method should be synchronous, i.e. returning its result (as opposed to + /// asynchronous, see AsyncMethod). + /// + /// The handWithAsyncMethod function can be used to remove some of the + /// boilerplate from writing wrappers for method calls: + /// + /// @code{.cpp} + /// class MyClass { + /// public: + /// std::string myMethod(uint32_t X, bool Y) { ... } + /// }; + /// + /// // SPS Method signature -- note MyClass object address as first + /// // argument. + /// using SPSMyMethodWrapperSignature = + /// SPSString(SPSExecutorAddr, uint32_t, bool); + /// + /// + /// static void adder_add_sync_sps_wrapper( + /// orc_rt_SessionRef Session, void *CallCtx, + /// orc_rt_WrapperFunctionReturn Return, + /// orc_rt_WrapperFunctionBuffer ArgBytes) { + /// using SPSSig = SPSString(SPSExecutorAddr, int32_t, bool); + /// SPSWrapperFunction<SPSSig>::handle( + /// Session, CallCtx, Return, ArgBytes, + /// WrapperFunction::handleWithSyncMethod(&Adder::addSync)); + /// } + /// @endcode + /// + template <typename ClassT, typename RetT, typename... ArgTs> + static SyncMethod<ClassT, RetT, ArgTs...> + handleWithSyncMethod(RetT (ClassT::*M)(ArgTs...)) { + return SyncMethod<ClassT, RetT, ArgTs...>(M); + } + /// Make a call to a wrapper function. /// /// This utility serializes and deserializes arguments and return values diff --git a/orc-rt/unittests/SPSWrapperFunctionTest.cpp b/orc-rt/unittests/SPSWrapperFunctionTest.cpp index 32aaa61..e010e2a 100644 --- a/orc-rt/unittests/SPSWrapperFunctionTest.cpp +++ b/orc-rt/unittests/SPSWrapperFunctionTest.cpp @@ -297,3 +297,53 @@ TEST(SPSWrapperFunctionUtilsTest, TestHandlerWithReferences) { EXPECT_EQ(OpCounter<3>::moves(), 1U); EXPECT_EQ(OpCounter<3>::copies(), 0U); } + +namespace { +class Adder { +public: + int32_t addSync(int32_t X, int32_t Y) { return X + Y; } + void addAsync(move_only_function<void(int32_t)> Return, int32_t X, + int32_t Y) { + Return(addSync(X, Y)); + } +}; +} // anonymous namespace + +static void adder_add_async_sps_wrapper(orc_rt_SessionRef Session, + void *CallCtx, + orc_rt_WrapperFunctionReturn Return, + orc_rt_WrapperFunctionBuffer ArgBytes) { + SPSWrapperFunction<int32_t(SPSExecutorAddr, int32_t, int32_t)>::handle( + Session, CallCtx, Return, ArgBytes, + WrapperFunction::handleWithAsyncMethod(&Adder::addAsync)); +} + +TEST(SPSWrapperFunctionUtilsTest, HandleWtihAsyncMethod) { + auto A = std::make_unique<Adder>(); + int32_t Result = 0; + SPSWrapperFunction<int32_t(SPSExecutorAddr, int32_t, int32_t)>::call( + DirectCaller(nullptr, adder_add_async_sps_wrapper), + [&](Expected<int32_t> R) { Result = cantFail(std::move(R)); }, + ExecutorAddr::fromPtr(A.get()), 41, 1); + + EXPECT_EQ(Result, 42); +} + +static void adder_add_sync_sps_wrapper(orc_rt_SessionRef Session, void *CallCtx, + orc_rt_WrapperFunctionReturn Return, + orc_rt_WrapperFunctionBuffer ArgBytes) { + SPSWrapperFunction<int32_t(SPSExecutorAddr, int32_t, int32_t)>::handle( + Session, CallCtx, Return, ArgBytes, + WrapperFunction::handleWithSyncMethod(&Adder::addSync)); +} + +TEST(SPSWrapperFunctionUtilsTest, HandleWithSyncMethod) { + auto A = std::make_unique<Adder>(); + int32_t Result = 0; + SPSWrapperFunction<int32_t(SPSExecutorAddr, int32_t, int32_t)>::call( + DirectCaller(nullptr, adder_add_sync_sps_wrapper), + [&](Expected<int32_t> R) { Result = cantFail(std::move(R)); }, + ExecutorAddr::fromPtr(A.get()), 41, 1); + + EXPECT_EQ(Result, 42); +} |