diff options
413 files changed, 14812 insertions, 3437 deletions
diff --git a/.github/workflows/libcxx-run-benchmarks.yml b/.github/workflows/libcxx-run-benchmarks.yml index 0379a0a..9e8f558 100644 --- a/.github/workflows/libcxx-run-benchmarks.yml +++ b/.github/workflows/libcxx-run-benchmarks.yml @@ -64,17 +64,21 @@ jobs: path: repo # Avoid nuking the workspace, where we have the Python virtualenv - name: Run baseline + env: + BENCHMARKS: ${{ steps.vars.outputs.benchmarks }} run: | source .venv/bin/activate && cd repo python -m pip install -r libcxx/utils/requirements.txt baseline_commit=$(git merge-base ${{ steps.vars.outputs.pr_base }} ${{ steps.vars.outputs.pr_head }}) - ./libcxx/utils/test-at-commit --commit ${baseline_commit} -B build/baseline -- -sv -j1 --param optimization=speed ${{ steps.vars.outputs.benchmarks }} + ./libcxx/utils/test-at-commit --commit ${baseline_commit} -B build/baseline -- -sv -j1 --param optimization=speed "$BENCHMARKS" ./libcxx/utils/consolidate-benchmarks build/baseline | tee baseline.lnt - name: Run candidate + env: + BENCHMARKS: ${{ steps.vars.outputs.benchmarks }} run: | source .venv/bin/activate && cd repo - ./libcxx/utils/test-at-commit --commit ${{ steps.vars.outputs.pr_head }} -B build/candidate -- -sv -j1 --param optimization=speed ${{ steps.vars.outputs.benchmarks }} + ./libcxx/utils/test-at-commit --commit ${{ steps.vars.outputs.pr_head }} -B build/candidate -- -sv -j1 --param optimization=speed "$BENCHMARKS" ./libcxx/utils/consolidate-benchmarks build/candidate | tee candidate.lnt - name: Compare baseline and candidate runs diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index 83969b5..3f2eb3a 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -58,7 +58,6 @@ jobs: ref: ${{ steps.vars.outputs.ref }} upload: ${{ steps.vars.outputs.upload }} target-cmake-flags: ${{ steps.vars.outputs.target-cmake-flags }} - ccache: ${{ steps.vars.outputs.ccache }} build-flang: ${{ steps.vars.outputs.build-flang }} release-binary-basename: ${{ steps.vars.outputs.release-binary-basename }} release-binary-filename: ${{ steps.vars.outputs.release-binary-filename }} @@ -123,13 +122,6 @@ jobs: echo "release-binary-filename=$release_binary_basename.tar.xz" >> $GITHUB_OUTPUT target="$RUNNER_OS-$RUNNER_ARCH" - # The hendrikmuhs/ccache-action action does not support installing sccache - # on arm64 Linux. - if [ "$target" = "Linux-ARM64" ]; then - echo ccache=ccache >> $GITHUB_OUTPUT - else - echo ccache=sccache >> $GITHUB_OUTPUT - fi # The macOS builds try to cross compile some libraries so we need to # add extra CMake args to disable them. @@ -222,15 +214,12 @@ jobs: - name: Configure id: build shell: bash - env: - CCACHE_BIN: ${{ needs.prepare.outputs.ccache }} run: | # There were some issues on the ARM64 MacOS runners with trying to build x86 object, # so we need to set some extra cmake flags to disable this. cmake -G Ninja -S llvm -B ${{ steps.setup-stage.outputs.build-prefix }}/build \ ${{ needs.prepare.outputs.target-cmake-flags }} \ -C clang/cmake/caches/Release.cmake \ - -DBOOTSTRAP_LLVM_PARALLEL_LINK_JOBS=1 \ -DBOOTSTRAP_BOOTSTRAP_CPACK_PACKAGE_FILE_NAME="${{ needs.prepare.outputs.release-binary-basename }}" - name: Build diff --git a/.github/workflows/release-documentation.yml b/.github/workflows/release-documentation.yml index d3d375d..4cf973d 100644 --- a/.github/workflows/release-documentation.yml +++ b/.github/workflows/release-documentation.yml @@ -25,6 +25,10 @@ on: description: 'Upload documentation' required: false type: boolean + secrets: + WWW_RELEASES_TOKEN: + description: "Secret used to create a PR with the documentation changes." + required: false jobs: release-documentation: diff --git a/.github/workflows/release-tasks.yml b/.github/workflows/release-tasks.yml index a184996..d4c2a55 100644 --- a/.github/workflows/release-tasks.yml +++ b/.github/workflows/release-tasks.yml @@ -54,6 +54,9 @@ jobs: with: release-version: ${{ needs.validate-tag.outputs.release-version }} upload: true + # Called workflows don't have access to secrets by default, so we need to explicitly pass secrets that we use. + secrets: + WWW_RELEASES_TOKEN: ${{ secrets.WWW_RELEASES_TOKEN }} release-doxygen: name: Build and Upload Release Doxygen diff --git a/clang-tools-extra/clang-doc/assets/clang-doc-mustache.css b/clang-tools-extra/clang-doc/assets/clang-doc-mustache.css index e555ee7..67f11f7 100644 --- a/clang-tools-extra/clang-doc/assets/clang-doc-mustache.css +++ b/clang-tools-extra/clang-doc/assets/clang-doc-mustache.css @@ -396,7 +396,7 @@ body, html { .sidebar { width: 250px; - top: 0; + top: 60px; left: 0; height: 100%; position: fixed; diff --git a/clang/bindings/python/clang/cindex.py b/clang/bindings/python/clang/cindex.py index da97bd3..2786add 100644 --- a/clang/bindings/python/clang/cindex.py +++ b/clang/bindings/python/clang/cindex.py @@ -3015,7 +3015,7 @@ class _CXUnsavedFile(Structure): # Functions calls through the python interface are rather slow. Fortunately, -# for most symboles, we do not need to perform a function call. Their spelling +# for most symbols, we do not need to perform a function call. Their spelling # never changes and is consequently provided by this spelling cache. SPELLING_CACHE = { # 0: CompletionChunk.Kind("Optional"), diff --git a/clang/cmake/caches/Release.cmake b/clang/cmake/caches/Release.cmake index a523cc5..82bfdc0 100644 --- a/clang/cmake/caches/Release.cmake +++ b/clang/cmake/caches/Release.cmake @@ -44,6 +44,19 @@ set(LLVM_RELEASE_ENABLE_LTO THIN CACHE STRING "") set(LLVM_RELEASE_ENABLE_PGO ON CACHE BOOL "") set(LLVM_RELEASE_ENABLE_RUNTIMES ${DEFAULT_RUNTIMES} CACHE STRING "") set(LLVM_RELEASE_ENABLE_PROJECTS ${DEFAULT_PROJECTS} CACHE STRING "") + +# This option enables linking stage2 clang statically with the runtimes +# (libc++ and compiler-rt) from stage1. In theory this will give the +# binaries better performance and make them more portable. However, +# this configuration is not well tested and causes build failures with +# the flang-rt tests cases, since the -stclib=libc++ flag does not +# get propagated to the runtimes build. There is also a separate +# issue on Darwin where clang will use the local libc++ headers, but +# link with system libc++ which can cause some incompatibilities. +# See https://github.com/llvm/llvm-project/issues/77653 +# Because of these problems, this option will default to OFF. +set(LLVM_RELEASE_ENABLE_LINK_LOCAL_RUNTIMES OFF CACHE BOOL "") + # Note we don't need to add install here, since it is one of the pre-defined # steps. set(LLVM_RELEASE_FINAL_STAGE_TARGETS "clang;package;check-all;check-llvm;check-clang" CACHE STRING "") @@ -55,8 +68,12 @@ set(CLANG_ENABLE_BOOTSTRAP ON CACHE BOOL "") set(STAGE1_PROJECTS "clang") +# Need to build compiler-rt in order to use PGO for later stages. +set(STAGE1_RUNTIMES "compiler-rt") # Build all runtimes so we can statically link them into the stage2 compiler. -set(STAGE1_RUNTIMES "compiler-rt;libcxx;libcxxabi;libunwind") +if(LLVM_RELEASE_ENABLE_LINK_LOCAL_RUNTIMES) + list(APPEND STAGE1_RUNTIMES "libcxx;libcxxabi;libunwind") +endif() if (LLVM_RELEASE_ENABLE_PGO) list(APPEND STAGE1_PROJECTS "lld") @@ -118,11 +135,13 @@ set_instrument_and_final_stage_var(LLVM_ENABLE_LTO "${LLVM_RELEASE_ENABLE_LTO}" if (LLVM_RELEASE_ENABLE_LTO) set_instrument_and_final_stage_var(LLVM_ENABLE_LLD "ON" BOOL) endif() -set_instrument_and_final_stage_var(LLVM_ENABLE_LIBCXX "ON" BOOL) -set_instrument_and_final_stage_var(LLVM_STATIC_LINK_CXX_STDLIB "ON" BOOL) -set(RELEASE_LINKER_FLAGS "-rtlib=compiler-rt --unwindlib=libunwind") -if(NOT ${CMAKE_HOST_SYSTEM_NAME} MATCHES "Darwin") - set(RELEASE_LINKER_FLAGS "${RELEASE_LINKER_FLAGS} -static-libgcc") +if(LLVM_RELEASE_ENABLE_LINK_LOCAL_RUNTIMES) + set_instrument_and_final_stage_var(LLVM_ENABLE_LIBCXX "ON" BOOL) + set_instrument_and_final_stage_var(LLVM_STATIC_LINK_CXX_STDLIB "ON" BOOL) + set(RELEASE_LINKER_FLAGS "-rtlib=compiler-rt --unwindlib=libunwind") + if(NOT ${CMAKE_HOST_SYSTEM_NAME} MATCHES "Darwin") + set(RELEASE_LINKER_FLAGS "${RELEASE_LINKER_FLAGS} -static-libgcc") + endif() endif() # Set flags for bolt @@ -130,9 +149,11 @@ if (${CMAKE_HOST_SYSTEM_NAME} MATCHES "Linux") set(RELEASE_LINKER_FLAGS "${RELEASE_LINKER_FLAGS} -Wl,--emit-relocs,-znow") endif() -set_instrument_and_final_stage_var(CMAKE_EXE_LINKER_FLAGS ${RELEASE_LINKER_FLAGS} STRING) -set_instrument_and_final_stage_var(CMAKE_SHARED_LINKER_FLAGS ${RELEASE_LINKER_FLAGS} STRING) -set_instrument_and_final_stage_var(CMAKE_MODULE_LINKER_FLAGS ${RELEASE_LINKER_FLAGS} STRING) +if (RELEASE_LINKER_FLAGS) + set_instrument_and_final_stage_var(CMAKE_EXE_LINKER_FLAGS ${RELEASE_LINKER_FLAGS} STRING) + set_instrument_and_final_stage_var(CMAKE_SHARED_LINKER_FLAGS ${RELEASE_LINKER_FLAGS} STRING) + set_instrument_and_final_stage_var(CMAKE_MODULE_LINKER_FLAGS ${RELEASE_LINKER_FLAGS} STRING) +endif() # Final Stage Config (stage2) set_final_stage_var(LLVM_ENABLE_RUNTIMES "${LLVM_RELEASE_ENABLE_RUNTIMES}" STRING) diff --git a/clang/docs/AllocToken.rst b/clang/docs/AllocToken.rst index bda8466..b65e18c 100644 --- a/clang/docs/AllocToken.rst +++ b/clang/docs/AllocToken.rst @@ -37,8 +37,8 @@ The default mode to calculate tokens is: pointers. Other token ID assignment modes are supported, but they may be subject to -change or removal. These may (experimentally) be selected with ``-mllvm --alloc-token-mode=<mode>``: +change or removal. These may (experimentally) be selected with ``-Xclang +-falloc-token-mode=<mode>``: * ``typehash``: This mode assigns a token ID based on the hash of the allocated type's name. diff --git a/clang/include/clang/AST/StmtOpenACC.h b/clang/include/clang/AST/StmtOpenACC.h index 8b4554e..4d52805 100644 --- a/clang/include/clang/AST/StmtOpenACC.h +++ b/clang/include/clang/AST/StmtOpenACC.h @@ -815,6 +815,17 @@ public: Stmt *getAssociatedStmt() { return OpenACCAssociatedStmtConstruct::getAssociatedStmt(); } + + // A struct to represent a broken-down version of the associated statement, + // providing the information specified in OpenACC3.3 Section 2.12. + struct StmtInfo { + const Expr *V; + const Expr *X; + // TODO: OpenACC: We should expand this as we're implementing the other + // atomic construct kinds. + }; + + const StmtInfo getAssociatedStmtInfo() const; }; } // namespace clang diff --git a/clang/include/clang/Basic/CodeGenOptions.h b/clang/include/clang/Basic/CodeGenOptions.h index cae06c3..5d5cf25 100644 --- a/clang/include/clang/Basic/CodeGenOptions.h +++ b/clang/include/clang/Basic/CodeGenOptions.h @@ -447,10 +447,6 @@ public: std::optional<double> AllowRuntimeCheckSkipHotCutoff; - /// Maximum number of allocation tokens (0 = no max), nullopt if none set (use - /// pass default). - std::optional<uint64_t> AllocTokenMax; - /// List of backend command-line options for -fembed-bitcode. std::vector<uint8_t> CmdArgs; diff --git a/clang/include/clang/Basic/LangOptions.h b/clang/include/clang/Basic/LangOptions.h index 260a753..8aa89d8 100644 --- a/clang/include/clang/Basic/LangOptions.h +++ b/clang/include/clang/Basic/LangOptions.h @@ -25,6 +25,7 @@ #include "llvm/ADT/FloatingPointMode.h" #include "llvm/ADT/StringRef.h" #include "llvm/BinaryFormat/DXContainer.h" +#include "llvm/Support/AllocToken.h" #include "llvm/TargetParser/Triple.h" #include <optional> #include <string> @@ -565,6 +566,13 @@ public: bool AtomicFineGrainedMemory = false; bool AtomicIgnoreDenormalMode = false; + /// Maximum number of allocation tokens (0 = no max), nullopt if none set (use + /// target default). + std::optional<uint64_t> AllocTokenMax; + + /// The allocation token mode. + std::optional<llvm::AllocTokenMode> AllocTokenMode; + LangOptions(); /// Set language defaults for the given input language and diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 7ae153d..0c9584f 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -2751,6 +2751,10 @@ def falloc_token_max_EQ : Joined<["-"], "falloc-token-max=">, MetaVarName<"<N>">, HelpText<"Limit to maximum N allocation tokens (0 = no max)">; +def falloc_token_mode_EQ : Joined<["-"], "falloc-token-mode=">, + Group<f_Group>, Visibility<[CC1Option]>, + HelpText<"Set the allocation token mode (experimental)">; + def fallow_runtime_check_skip_hot_cutoff_EQ : Joined<["-"], "fallow-runtime-check-skip-hot-cutoff=">, Group<f_clang_Group>, diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp index 6b98927..f15b3c1 100644 --- a/clang/lib/AST/ByteCode/Compiler.cpp +++ b/clang/lib/AST/ByteCode/Compiler.cpp @@ -6432,6 +6432,13 @@ bool Compiler<Emitter>::visitFunc(const FunctionDecl *F) { return this->emitNoRet(SourceInfo{}); } +static uint32_t getBitWidth(const Expr *E) { + assert(E->refersToBitField()); + const auto *ME = cast<MemberExpr>(E); + const auto *FD = cast<FieldDecl>(ME->getMemberDecl()); + return FD->getBitWidthValue(); +} + template <class Emitter> bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { const Expr *SubExpr = E->getSubExpr(); @@ -6460,10 +6467,15 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { return DiscardResult ? this->emitPopPtr(E) : true; } - if (T == PT_Float) { + if (T == PT_Float) return DiscardResult ? this->emitIncfPop(getFPOptions(E), E) : this->emitIncf(getFPOptions(E), E); - } + + if (SubExpr->refersToBitField()) + return DiscardResult ? this->emitIncPopBitfield(*T, E->canOverflow(), + getBitWidth(SubExpr), E) + : this->emitIncBitfield(*T, E->canOverflow(), + getBitWidth(SubExpr), E); return DiscardResult ? this->emitIncPop(*T, E->canOverflow(), E) : this->emitInc(*T, E->canOverflow(), E); @@ -6484,9 +6496,15 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { return DiscardResult ? this->emitPopPtr(E) : true; } - if (T == PT_Float) { + if (T == PT_Float) return DiscardResult ? this->emitDecfPop(getFPOptions(E), E) : this->emitDecf(getFPOptions(E), E); + + if (SubExpr->refersToBitField()) { + return DiscardResult ? this->emitDecPopBitfield(*T, E->canOverflow(), + getBitWidth(SubExpr), E) + : this->emitDecBitfield(*T, E->canOverflow(), + getBitWidth(SubExpr), E); } return DiscardResult ? this->emitDecPop(*T, E->canOverflow(), E) @@ -6515,6 +6533,11 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { if (DiscardResult) { if (T == PT_Float) return this->emitIncfPop(getFPOptions(E), E); + if (SubExpr->refersToBitField()) + return DiscardResult ? this->emitIncPopBitfield(*T, E->canOverflow(), + getBitWidth(SubExpr), E) + : this->emitIncBitfield(*T, E->canOverflow(), + getBitWidth(SubExpr), E); return this->emitIncPop(*T, E->canOverflow(), E); } @@ -6530,6 +6553,11 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { return false; if (!this->emitStoreFloat(E)) return false; + } else if (SubExpr->refersToBitField()) { + assert(isIntegralType(*T)); + if (!this->emitPreIncBitfield(*T, E->canOverflow(), getBitWidth(SubExpr), + E)) + return false; } else { assert(isIntegralType(*T)); if (!this->emitPreInc(*T, E->canOverflow(), E)) @@ -6560,6 +6588,11 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { if (DiscardResult) { if (T == PT_Float) return this->emitDecfPop(getFPOptions(E), E); + if (SubExpr->refersToBitField()) + return DiscardResult ? this->emitDecPopBitfield(*T, E->canOverflow(), + getBitWidth(SubExpr), E) + : this->emitDecBitfield(*T, E->canOverflow(), + getBitWidth(SubExpr), E); return this->emitDecPop(*T, E->canOverflow(), E); } @@ -6575,6 +6608,11 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { return false; if (!this->emitStoreFloat(E)) return false; + } else if (SubExpr->refersToBitField()) { + assert(isIntegralType(*T)); + if (!this->emitPreDecBitfield(*T, E->canOverflow(), getBitWidth(SubExpr), + E)) + return false; } else { assert(isIntegralType(*T)); if (!this->emitPreDec(*T, E->canOverflow(), E)) diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h index d8529da..89f6fbe 100644 --- a/clang/lib/AST/ByteCode/Interp.h +++ b/clang/lib/AST/ByteCode/Interp.h @@ -702,7 +702,7 @@ enum class IncDecOp { template <typename T, IncDecOp Op, PushVal DoPush> bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr, - bool CanOverflow) { + bool CanOverflow, UnsignedOrNone BitWidth = std::nullopt) { assert(!Ptr.isDummy()); if (!S.inConstantContext()) { @@ -725,12 +725,18 @@ bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr, if constexpr (Op == IncDecOp::Inc) { if (!T::increment(Value, &Result) || !CanOverflow) { - Ptr.deref<T>() = Result; + if (BitWidth) + Ptr.deref<T>() = Result.truncate(*BitWidth); + else + Ptr.deref<T>() = Result; return true; } } else { if (!T::decrement(Value, &Result) || !CanOverflow) { - Ptr.deref<T>() = Result; + if (BitWidth) + Ptr.deref<T>() = Result.truncate(*BitWidth); + else + Ptr.deref<T>() = Result; return true; } } @@ -774,6 +780,17 @@ bool Inc(InterpState &S, CodePtr OpPC, bool CanOverflow) { CanOverflow); } +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool IncBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow, + unsigned BitWidth) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Increment)) + return false; + + return IncDecHelper<T, IncDecOp::Inc, PushVal::Yes>(S, OpPC, Ptr, CanOverflow, + BitWidth); +} + /// 1) Pops a pointer from the stack /// 2) Load the value from the pointer /// 3) Writes the value increased by one back to the pointer @@ -787,6 +804,17 @@ bool IncPop(InterpState &S, CodePtr OpPC, bool CanOverflow) { } template <PrimType Name, class T = typename PrimConv<Name>::T> +bool IncPopBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow, + uint32_t BitWidth) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Increment)) + return false; + + return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, CanOverflow, + BitWidth); +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> bool PreInc(InterpState &S, CodePtr OpPC, bool CanOverflow) { const Pointer &Ptr = S.Stk.peek<Pointer>(); if (!CheckLoad(S, OpPC, Ptr, AK_Increment)) @@ -795,6 +823,17 @@ bool PreInc(InterpState &S, CodePtr OpPC, bool CanOverflow) { return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, CanOverflow); } +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool PreIncBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow, + uint32_t BitWidth) { + const Pointer &Ptr = S.Stk.peek<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Increment)) + return false; + + return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, CanOverflow, + BitWidth); +} + /// 1) Pops a pointer from the stack /// 2) Load the value from the pointer /// 3) Writes the value decreased by one back to the pointer @@ -808,6 +847,16 @@ bool Dec(InterpState &S, CodePtr OpPC, bool CanOverflow) { return IncDecHelper<T, IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr, CanOverflow); } +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool DecBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow, + uint32_t BitWidth) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Decrement)) + return false; + + return IncDecHelper<T, IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr, CanOverflow, + BitWidth); +} /// 1) Pops a pointer from the stack /// 2) Load the value from the pointer @@ -822,6 +871,17 @@ bool DecPop(InterpState &S, CodePtr OpPC, bool CanOverflow) { } template <PrimType Name, class T = typename PrimConv<Name>::T> +bool DecPopBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow, + uint32_t BitWidth) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Decrement)) + return false; + + return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, CanOverflow, + BitWidth); +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> bool PreDec(InterpState &S, CodePtr OpPC, bool CanOverflow) { const Pointer &Ptr = S.Stk.peek<Pointer>(); if (!CheckLoad(S, OpPC, Ptr, AK_Decrement)) @@ -829,6 +889,16 @@ bool PreDec(InterpState &S, CodePtr OpPC, bool CanOverflow) { return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, CanOverflow); } +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool PreDecBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow, + uint32_t BitWidth) { + const Pointer &Ptr = S.Stk.peek<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Decrement)) + return false; + return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, CanOverflow, + BitWidth); +} + template <IncDecOp Op, PushVal DoPush> bool IncDecFloatHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr, uint32_t FPOI) { diff --git a/clang/lib/AST/ByteCode/Opcodes.td b/clang/lib/AST/ByteCode/Opcodes.td index 532c444..406feb5 100644 --- a/clang/lib/AST/ByteCode/Opcodes.td +++ b/clang/lib/AST/ByteCode/Opcodes.td @@ -612,12 +612,25 @@ class OverflowOpcode : Opcode { let HasGroup = 1; } +class OverflowBitfieldOpcode : Opcode { + let Types = [AluTypeClass]; + let Args = [ArgBool, ArgUint32]; + let HasGroup = 1; +} + def Inc : OverflowOpcode; +def IncBitfield : OverflowBitfieldOpcode; def IncPop : OverflowOpcode; +def IncPopBitfield : OverflowBitfieldOpcode; def PreInc : OverflowOpcode; +def PreIncBitfield : OverflowBitfieldOpcode; + def Dec : OverflowOpcode; +def DecBitfield : OverflowBitfieldOpcode; def DecPop : OverflowOpcode; +def DecPopBitfield : OverflowBitfieldOpcode; def PreDec : OverflowOpcode; +def PreDecBitfield : OverflowBitfieldOpcode; // Float increment and decrement. def Incf: FloatOpcode; diff --git a/clang/lib/AST/StmtOpenACC.cpp b/clang/lib/AST/StmtOpenACC.cpp index 07e3de8..2b56c1e 100644 --- a/clang/lib/AST/StmtOpenACC.cpp +++ b/clang/lib/AST/StmtOpenACC.cpp @@ -12,7 +12,9 @@ #include "clang/AST/StmtOpenACC.h" #include "clang/AST/ASTContext.h" +#include "clang/AST/ExprCXX.h" #include "clang/AST/StmtCXX.h" + using namespace clang; OpenACCComputeConstruct * @@ -322,6 +324,38 @@ OpenACCAtomicConstruct *OpenACCAtomicConstruct::Create( return Inst; } +const OpenACCAtomicConstruct::StmtInfo +OpenACCAtomicConstruct::getAssociatedStmtInfo() const { + // This ends up being a vastly simplified version of SemaOpenACCAtomic, since + // it doesn't have to worry about erroring out, but we should do a lot of + // asserts to ensure we don't get off into the weeds. + assert(getAssociatedStmt() && "invalid associated stmt?"); + + switch (AtomicKind) { + case OpenACCAtomicKind::None: + case OpenACCAtomicKind::Write: + case OpenACCAtomicKind::Update: + case OpenACCAtomicKind::Capture: + assert(false && "Only 'read' has been implemented here"); + return {}; + case OpenACCAtomicKind::Read: { + // Read only supports the format 'v = x'; where both sides are a scalar + // expression. This can come in 2 forms; BinaryOperator or + // CXXOperatorCallExpr (rarely). + const Expr *AssignExpr = cast<const Expr>(getAssociatedStmt()); + if (const auto *BO = dyn_cast<BinaryOperator>(AssignExpr)) { + assert(BO->getOpcode() == BO_Assign); + return {BO->getLHS()->IgnoreImpCasts(), BO->getRHS()->IgnoreImpCasts()}; + } + + const auto *OO = cast<CXXOperatorCallExpr>(AssignExpr); + assert(OO->getOperator() == OO_Equal); + + return {OO->getArg(0)->IgnoreImpCasts(), OO->getArg(1)->IgnoreImpCasts()}; + } + } +} + OpenACCCacheConstruct *OpenACCCacheConstruct::CreateEmpty(const ASTContext &C, unsigned NumVars) { void *Mem = diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp index 19ed656..800262a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp @@ -1011,9 +1011,9 @@ public: } mlir::Attribute VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die, QualType t) { - cgm.errorNYI(die->getBeginLoc(), - "ConstExprEmitter::VisitCXXDefaultInitExpr"); - return {}; + // No need for a DefaultInitExprScope: we don't handle 'this' in a + // constant expression. + return Visit(die->getExpr(), t); } mlir::Attribute VisitExprWithCleanups(ExprWithCleanups *e, QualType t) { @@ -1028,9 +1028,7 @@ public: mlir::Attribute VisitImplicitValueInitExpr(ImplicitValueInitExpr *e, QualType t) { - cgm.errorNYI(e->getBeginLoc(), - "ConstExprEmitter::VisitImplicitValueInitExpr"); - return {}; + return cgm.getBuilder().getZeroInitAttr(cgm.convertType(t)); } mlir::Attribute VisitInitListExpr(InitListExpr *ile, QualType t) { diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp index ce4ae7e..385f89c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp @@ -553,12 +553,15 @@ public: } void VisitIfClause(const OpenACCIfClause &clause) { - if constexpr (isOneOfTypes<OpTy, mlir::acc::ParallelOp, mlir::acc::SerialOp, - mlir::acc::KernelsOp, mlir::acc::InitOp, - mlir::acc::ShutdownOp, mlir::acc::SetOp, - mlir::acc::DataOp, mlir::acc::WaitOp, - mlir::acc::HostDataOp, mlir::acc::EnterDataOp, - mlir::acc::ExitDataOp, mlir::acc::UpdateOp>) { + if constexpr (isOneOfTypes< + OpTy, mlir::acc::ParallelOp, mlir::acc::SerialOp, + mlir::acc::KernelsOp, mlir::acc::InitOp, + mlir::acc::ShutdownOp, mlir::acc::SetOp, + mlir::acc::DataOp, mlir::acc::WaitOp, + mlir::acc::HostDataOp, mlir::acc::EnterDataOp, + mlir::acc::ExitDataOp, mlir::acc::UpdateOp, + mlir::acc::AtomicReadOp, mlir::acc::AtomicWriteOp, + mlir::acc::AtomicUpdateOp, mlir::acc::AtomicCaptureOp>) { operation.getIfCondMutable().append( createCondition(clause.getConditionExpr())); } else if constexpr (isCombinedType<OpTy>) { @@ -1144,6 +1147,10 @@ EXPL_SPEC(mlir::acc::HostDataOp) EXPL_SPEC(mlir::acc::EnterDataOp) EXPL_SPEC(mlir::acc::ExitDataOp) EXPL_SPEC(mlir::acc::UpdateOp) +EXPL_SPEC(mlir::acc::AtomicReadOp) +EXPL_SPEC(mlir::acc::AtomicWriteOp) +EXPL_SPEC(mlir::acc::AtomicCaptureOp) +EXPL_SPEC(mlir::acc::AtomicUpdateOp) #undef EXPL_SPEC template <typename ComputeOp, typename LoopOp> diff --git a/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp b/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp index e89393c..02bb46d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmtOpenACC.cpp @@ -306,6 +306,29 @@ CIRGenFunction::emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s) { mlir::LogicalResult CIRGenFunction::emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s) { - cgm.errorNYI(s.getSourceRange(), "OpenACC Atomic Construct"); - return mlir::failure(); + // For now, we are only support 'read', so diagnose. We can switch on the kind + // later once we start implementing the other 3 forms. + if (s.getAtomicKind() != OpenACCAtomicKind::Read) { + cgm.errorNYI(s.getSourceRange(), "OpenACC Atomic Construct"); + return mlir::failure(); + } + + // While Atomic is an 'associated statement' construct, it 'steals' the + // expression it is associated with rather than emitting it inside of it. So + // it has custom emit logic. + mlir::Location start = getLoc(s.getSourceRange().getBegin()); + OpenACCAtomicConstruct::StmtInfo inf = s.getAssociatedStmtInfo(); + // Atomic 'read' only permits 'v = x', where v and x are both scalar L values. + // The getAssociatedStmtInfo strips off implicit casts, which includes + // implicit conversions and L-to-R-Value conversions, so we can just emit it + // as an L value. The Flang implementation has no problem with different + // types, so it appears that the dialect can handle the conversions. + mlir::Value v = emitLValue(inf.V).getPointer(); + mlir::Value x = emitLValue(inf.X).getPointer(); + mlir::Type resTy = convertType(inf.V->getType()); + auto op = mlir::acc::AtomicReadOp::create(builder, start, x, v, resTy, + /*ifCond=*/{}); + emitOpenACCClauses(op, s.getDirectiveKind(), s.getDirectiveLoc(), + s.clauses()); + return mlir::success(); } diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp index 6020684..c423c4b 100644 --- a/clang/lib/CodeGen/BackendUtil.cpp +++ b/clang/lib/CodeGen/BackendUtil.cpp @@ -234,9 +234,12 @@ public: }; } // namespace -static AllocTokenOptions getAllocTokenOptions(const CodeGenOptions &CGOpts) { +static AllocTokenOptions getAllocTokenOptions(const LangOptions &LangOpts, + const CodeGenOptions &CGOpts) { AllocTokenOptions Opts; - Opts.MaxTokens = CGOpts.AllocTokenMax; + if (LangOpts.AllocTokenMode) + Opts.Mode = *LangOpts.AllocTokenMode; + Opts.MaxTokens = LangOpts.AllocTokenMax; Opts.Extended = CGOpts.SanitizeAllocTokenExtended; Opts.FastABI = CGOpts.SanitizeAllocTokenFastABI; return Opts; @@ -430,12 +433,6 @@ static bool initTargetOptions(const CompilerInstance &CI, Options.NoInfsFPMath = LangOpts.NoHonorInfs; Options.NoNaNsFPMath = LangOpts.NoHonorNaNs; Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS; - Options.UnsafeFPMath = LangOpts.AllowFPReassoc && LangOpts.AllowRecip && - LangOpts.NoSignedZero && LangOpts.ApproxFunc && - (LangOpts.getDefaultFPContractMode() == - LangOptions::FPModeKind::FPM_Fast || - LangOpts.getDefaultFPContractMode() == - LangOptions::FPModeKind::FPM_FastHonorPragmas); Options.BBAddrMap = CodeGenOpts.BBAddrMap; Options.BBSections = @@ -808,7 +805,7 @@ static void addSanitizers(const Triple &TargetTriple, // memory allocation function detection. MPM.addPass(InferFunctionAttrsPass()); } - MPM.addPass(AllocTokenPass(getAllocTokenOptions(CodeGenOpts))); + MPM.addPass(AllocTokenPass(getAllocTokenOptions(LangOpts, CodeGenOpts))); } }; if (ClSanitizeOnOptimizerEarlyEP) { diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp index e490b1c..3746bc04 100644 --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -1325,22 +1325,29 @@ void CodeGenModule::Release() { "tag-stack-memory-buildattr", 1); if (T.isARM() || T.isThumb() || T.isAArch64()) { + // Previously 1 is used and meant for the backed to derive the function + // attribute form it. 2 now means function attributes already set for all + // functions in this module, so no need to propagate those from the module + // flag. Value is only used in case of LTO module merge because the backend + // will see all required function attribute set already. Value is used + // before modules got merged. Any posive value means the feature is active + // and required binary markings need to be emit accordingly. if (LangOpts.BranchTargetEnforcement) getModule().addModuleFlag(llvm::Module::Min, "branch-target-enforcement", - 1); + 2); if (LangOpts.BranchProtectionPAuthLR) getModule().addModuleFlag(llvm::Module::Min, "branch-protection-pauth-lr", - 1); + 2); if (LangOpts.GuardedControlStack) - getModule().addModuleFlag(llvm::Module::Min, "guarded-control-stack", 1); + getModule().addModuleFlag(llvm::Module::Min, "guarded-control-stack", 2); if (LangOpts.hasSignReturnAddress()) - getModule().addModuleFlag(llvm::Module::Min, "sign-return-address", 1); + getModule().addModuleFlag(llvm::Module::Min, "sign-return-address", 2); if (LangOpts.isSignReturnAddressScopeAll()) getModule().addModuleFlag(llvm::Module::Min, "sign-return-address-all", - 1); + 2); if (!LangOpts.isSignReturnAddressWithAKey()) getModule().addModuleFlag(llvm::Module::Min, - "sign-return-address-with-bkey", 1); + "sign-return-address-with-bkey", 2); if (LangOpts.PointerAuthELFGOT) getModule().addModuleFlag(llvm::Module::Min, "ptrauth-elf-got", 1); diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 5bd15f5..d2cb751 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -1833,10 +1833,6 @@ void CompilerInvocationBase::GenerateCodeGenArgs(const CodeGenOptions &Opts, serializeSanitizerKinds(Opts.SanitizeAnnotateDebugInfo)) GenerateArg(Consumer, OPT_fsanitize_annotate_debug_info_EQ, Sanitizer); - if (Opts.AllocTokenMax) - GenerateArg(Consumer, OPT_falloc_token_max_EQ, - std::to_string(*Opts.AllocTokenMax)); - if (!Opts.EmitVersionIdentMetadata) GenerateArg(Consumer, OPT_Qn); @@ -2350,15 +2346,6 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, } } - if (const auto *Arg = Args.getLastArg(options::OPT_falloc_token_max_EQ)) { - StringRef S = Arg->getValue(); - uint64_t Value = 0; - if (S.getAsInteger(0, Value)) - Diags.Report(diag::err_drv_invalid_value) << Arg->getAsString(Args) << S; - else - Opts.AllocTokenMax = Value; - } - Opts.EmitVersionIdentMetadata = Args.hasFlag(OPT_Qy, OPT_Qn, true); if (!LangOpts->CUDAIsDevice) @@ -3966,6 +3953,29 @@ void CompilerInvocationBase::GenerateLangArgs(const LangOptions &Opts, if (!Opts.RandstructSeed.empty()) GenerateArg(Consumer, OPT_frandomize_layout_seed_EQ, Opts.RandstructSeed); + + if (Opts.AllocTokenMax) + GenerateArg(Consumer, OPT_falloc_token_max_EQ, + std::to_string(*Opts.AllocTokenMax)); + + if (Opts.AllocTokenMode) { + StringRef S; + switch (*Opts.AllocTokenMode) { + case llvm::AllocTokenMode::Increment: + S = "increment"; + break; + case llvm::AllocTokenMode::Random: + S = "random"; + break; + case llvm::AllocTokenMode::TypeHash: + S = "typehash"; + break; + case llvm::AllocTokenMode::TypeHashPointerSplit: + S = "typehashpointersplit"; + break; + } + GenerateArg(Consumer, OPT_falloc_token_mode_EQ, S); + } } bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args, @@ -4544,6 +4554,23 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args, if (const Arg *A = Args.getLastArg(OPT_frandomize_layout_seed_EQ)) Opts.RandstructSeed = A->getValue(0); + if (const auto *Arg = Args.getLastArg(options::OPT_falloc_token_max_EQ)) { + StringRef S = Arg->getValue(); + uint64_t Value = 0; + if (S.getAsInteger(0, Value)) + Diags.Report(diag::err_drv_invalid_value) << Arg->getAsString(Args) << S; + else + Opts.AllocTokenMax = Value; + } + + if (const auto *Arg = Args.getLastArg(options::OPT_falloc_token_mode_EQ)) { + StringRef S = Arg->getValue(); + if (auto Mode = getAllocTokenModeFromString(S)) + Opts.AllocTokenMode = Mode; + else + Diags.Report(diag::err_drv_invalid_value) << Arg->getAsString(Args) << S; + } + // Validate options for HLSL if (Opts.HLSL) { // TODO: Revisit restricting SPIR-V to logical once we've figured out how to diff --git a/clang/lib/Headers/avx512ifmavlintrin.h b/clang/lib/Headers/avx512ifmavlintrin.h index c4449c7..b377c17 100644 --- a/clang/lib/Headers/avx512ifmavlintrin.h +++ b/clang/lib/Headers/avx512ifmavlintrin.h @@ -37,6 +37,7 @@ #endif +#if !(defined(__AVXIFMA__) || defined(__AVX512IFMA__)) #define _mm_madd52hi_epu64(X, Y, Z) \ ((__m128i)__builtin_ia32_vpmadd52huq128((__v2di)(X), (__v2di)(Y), \ (__v2di)(Z))) @@ -52,56 +53,83 @@ #define _mm256_madd52lo_epu64(X, Y, Z) \ ((__m256i)__builtin_ia32_vpmadd52luq256((__v4di)(X), (__v4di)(Y), \ (__v4di)(Z))) +#endif + +#if defined(__AVX512IFMA__) +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_madd52hi_epu64(__m128i __X, __m128i __Y, __m128i __Z) { + return (__m128i)__builtin_ia32_vpmadd52huq128((__v2di)__X, (__v2di)__Y, + (__v2di)__Z); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_madd52hi_epu64(__m256i __X, __m256i __Y, __m256i __Z) { + return (__m256i)__builtin_ia32_vpmadd52huq256((__v4di)__X, (__v4di)__Y, + (__v4di)__Z); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_madd52lo_epu64(__m128i __X, __m128i __Y, __m128i __Z) { + return (__m128i)__builtin_ia32_vpmadd52luq128((__v2di)__X, (__v2di)__Y, + (__v2di)__Z); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_madd52lo_epu64(__m256i __X, __m256i __Y, __m256i __Z) { + return (__m256i)__builtin_ia32_vpmadd52luq256((__v4di)__X, (__v4di)__Y, + (__v4di)__Z); +} +#endif static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_madd52hi_epu64(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_selectq_128( - __M, (__v2di)_mm_madd52hi_epu64(__W, __X, __Y), (__v2di)__W); + __M, (__v2di)__builtin_ia32_vpmadd52huq128(__W, __X, __Y), (__v2di)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_madd52hi_epu64(__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) { return (__m128i)__builtin_ia32_selectq_128( - __M, (__v2di)_mm_madd52hi_epu64(__X, __Y, __Z), + __M, (__v2di)__builtin_ia32_vpmadd52huq128(__X, __Y, __Z), (__v2di)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_madd52hi_epu64( __m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_selectq_256( - __M, (__v4di)_mm256_madd52hi_epu64(__W, __X, __Y), (__v4di)__W); + __M, (__v4di)__builtin_ia32_vpmadd52huq256(__W, __X, __Y), (__v4di)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_madd52hi_epu64( __mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) { return (__m256i)__builtin_ia32_selectq_256( - __M, (__v4di)_mm256_madd52hi_epu64(__X, __Y, __Z), + __M, (__v4di)__builtin_ia32_vpmadd52huq256(__X, __Y, __Z), (__v4di)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_madd52lo_epu64(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) { return (__m128i)__builtin_ia32_selectq_128( - __M, (__v2di)_mm_madd52lo_epu64(__W, __X, __Y), (__v2di)__W); + __M, (__v2di)__builtin_ia32_vpmadd52luq128(__W, __X, __Y), (__v2di)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_madd52lo_epu64(__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) { return (__m128i)__builtin_ia32_selectq_128( - __M, (__v2di)_mm_madd52lo_epu64(__X, __Y, __Z), + __M, (__v2di)__builtin_ia32_vpmadd52luq128(__X, __Y, __Z), (__v2di)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_madd52lo_epu64( __m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) { return (__m256i)__builtin_ia32_selectq_256( - __M, (__v4di)_mm256_madd52lo_epu64(__W, __X, __Y), (__v4di)__W); + __M, (__v4di)__builtin_ia32_vpmadd52luq256(__W, __X, __Y), (__v4di)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_madd52lo_epu64( __mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) { return (__m256i)__builtin_ia32_selectq_256( - __M, (__v4di)_mm256_madd52lo_epu64(__X, __Y, __Z), + __M, (__v4di)__builtin_ia32_vpmadd52luq256(__X, __Y, __Z), (__v4di)_mm256_setzero_si256()); } diff --git a/clang/lib/Headers/avxifmaintrin.h b/clang/lib/Headers/avxifmaintrin.h index a2ef601..e452d5f 100644 --- a/clang/lib/Headers/avxifmaintrin.h +++ b/clang/lib/Headers/avxifmaintrin.h @@ -31,6 +31,13 @@ __min_vector_width__(256))) #endif +#if !defined(__AVX512IFMA__) && defined(__AVXIFMA__) +#define _mm_madd52hi_epu64(X, Y, Z) _mm_madd52hi_avx_epu64(X, Y, Z) +#define _mm_madd52lo_epu64(X, Y, Z) _mm_madd52lo_avx_epu64(X, Y, Z) +#define _mm256_madd52hi_epu64(X, Y, Z) _mm256_madd52hi_avx_epu64(X, Y, Z) +#define _mm256_madd52lo_epu64(X, Y, Z) _mm256_madd52lo_avx_epu64(X, Y, Z) +#endif + // must vex-encoding /// Multiply packed unsigned 52-bit integers in each 64-bit element of \a __Y diff --git a/clang/lib/Sema/HeuristicResolver.cpp b/clang/lib/Sema/HeuristicResolver.cpp index cbdefaa..056e133 100644 --- a/clang/lib/Sema/HeuristicResolver.cpp +++ b/clang/lib/Sema/HeuristicResolver.cpp @@ -450,7 +450,12 @@ QualType HeuristicResolverImpl::resolveExprToType(const Expr *E) { if (const auto *CE = dyn_cast<CallExpr>(E)) { if (QualType Resolved = resolveTypeOfCallExpr(CE); !Resolved.isNull()) return Resolved; + + // Don't proceed to try resolveExprToDecls(), it would just call + // resolveTypeOfCallExpr() again. + return E->getType(); } + // Similarly, unwrapping a unary dereference operation does not work via // resolveExprToDecls. if (const auto *UO = dyn_cast<UnaryOperator>(E->IgnoreParenCasts())) { diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp index 3bb8080..ee9b2b3 100644 --- a/clang/lib/Sema/SemaOpenACC.cpp +++ b/clang/lib/Sema/SemaOpenACC.cpp @@ -2999,11 +2999,11 @@ bool SemaOpenACC::CreateReductionCombinerRecipe( BinOp = BinaryOperatorKind::BO_LT; break; case OpenACCReductionOperator::And: + BinOp = BinaryOperatorKind::BO_LAnd; + break; case OpenACCReductionOperator::Or: - // We just want a 'NYI' error in the backend, so leave an empty combiner - // recipe, and claim success. - CombinerRecipes.push_back({nullptr, nullptr, nullptr}); - return false; + BinOp = BinaryOperatorKind::BO_LOr; + break; } // If VarTy is an array type, at the top level only, we want to do our @@ -3068,8 +3068,21 @@ bool SemaOpenACC::CreateReductionCombinerRecipe( : CombinerFailureKind::Assignment}; } case OpenACCReductionOperator::And: - case OpenACCReductionOperator::Or: - llvm_unreachable("And/Or not implemented, but should fail earlier"); + case OpenACCReductionOperator::Or: { + // These are done as LHS = LHS && RHS (or LHS = LHS || RHS). So after the + // binop, all we have to do is the assignment. + if (!BinOpRes.isUsable()) + return {BinOpRes, CombinerFailureKind::BinOp}; + + // Build assignment. + ExprResult Assignment = SemaRef.BuildBinOp(SemaRef.getCurScope(), Loc, + BinaryOperatorKind::BO_Assign, + LHSDRE, BinOpRes.get(), + /*ForFoldExpr=*/false); + return {Assignment, Assignment.isUsable() + ? CombinerFailureKind::None + : CombinerFailureKind::Assignment}; + } case OpenACCReductionOperator::Invalid: llvm_unreachable("Invalid should have been caught above"); } diff --git a/clang/test/AST/ByteCode/bitfields.cpp b/clang/test/AST/ByteCode/bitfields.cpp index df8d567..a583282 100644 --- a/clang/test/AST/ByteCode/bitfields.cpp +++ b/clang/test/AST/ByteCode/bitfields.cpp @@ -128,3 +128,82 @@ namespace NonConstBitWidth { // both-note {{read of non-const variable}} }; } + +namespace IncDecOverflow { + constexpr bool test1() { + struct {unsigned u: 5; } a {}; + a.u--; + return a.u == 31; + } + static_assert(test1(), ""); + + constexpr bool test2() { + struct {unsigned u: 5; } a {}; + --a.u; + return a.u == 31; + } + static_assert(test2(), ""); + + constexpr bool test3() { + int x = 0; + struct {unsigned u: 5; } a {}; + x = a.u--; + return a.u == 31; + } + static_assert(test3(), ""); + + constexpr bool test4() { + int x = 0; + struct {unsigned u: 5; } a {}; + x = --a.u; + return a.u == 31; + } + static_assert(test4(), ""); + + constexpr bool test5() { + struct {unsigned u: 5; } a {}; + a.u = 31; + ++a.u; + + return a.u == 0; + } + static_assert(test5(), ""); + + constexpr bool test6() { + struct {unsigned u: 5; } a {}; + a.u = 31; + ++a.u; + + return a.u == 0; + } + static_assert(test6(), ""); + + constexpr bool test7() { + struct {unsigned u: 5; } a {}; + a.u = 31; + a.u++; + + return a.u == 0; + } + static_assert(test7(), ""); + + constexpr bool test8() { + int x = 0; + struct {unsigned u: 5; } a {}; + a.u = 31; + x = a.u++; + + return a.u == 0; + } + static_assert(test8(), ""); + + constexpr bool test9() { + int x = 0; + struct {unsigned u: 5; } a {}; + a.u = 31; + x = ++a.u; + + return a.u == 0; + } + static_assert(test9(), ""); +} diff --git a/clang/test/CIR/CodeGen/struct-init.cpp b/clang/test/CIR/CodeGen/struct-init.cpp index a47ef53..cb50999 100644 --- a/clang/test/CIR/CodeGen/struct-init.cpp +++ b/clang/test/CIR/CodeGen/struct-init.cpp @@ -9,6 +9,22 @@ struct S { int a, b, c; }; +S partial_init = { 1 }; + +// CIR: cir.global external @partial_init = #cir.const_record<{#cir.int<1> : !s32i, #cir.int<0> : !s32i, #cir.int<0> : !s32i}> : !rec_S +// LLVM: @partial_init = global %struct.S { i32 1, i32 0, i32 0 } +// OGCG: @partial_init = global %struct.S { i32 1, i32 0, i32 0 } + +struct StructWithDefaultInit { + int a = 2; +}; + +StructWithDefaultInit swdi = {}; + +// CIR: cir.global external @swdi = #cir.const_record<{#cir.int<2> : !s32i}> : !rec_StructWithDefaultInit +// LLVM: @swdi = global %struct.StructWithDefaultInit { i32 2 }, align 4 +// OGCG: @swdi = global %struct.StructWithDefaultInit { i32 2 }, align 4 + void init() { S s1 = {1, 2, 3}; S s2 = {4, 5}; diff --git a/clang/test/CIR/CodeGenOpenACC/atomic-read.cpp b/clang/test/CIR/CodeGenOpenACC/atomic-read.cpp new file mode 100644 index 0000000..9882f05 --- /dev/null +++ b/clang/test/CIR/CodeGenOpenACC/atomic-read.cpp @@ -0,0 +1,24 @@ +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s + +void use(int x, unsigned int y, float f) { + // CHECK: cir.func{{.*}}(%[[X_ARG:.*]]: !s32i{{.*}}, %[[Y_ARG:.*]]: !u32i{{.*}}, %[[F_ARG:.*]]: !cir.float{{.*}}){{.*}}{ + // CHECK-NEXT: %[[X_ALLOC:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init] + // CHECK-NEXT: %[[Y_ALLOC:.*]] = cir.alloca !u32i, !cir.ptr<!u32i>, ["y", init] + // CHECK-NEXT: %[[F_ALLOC:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["f", init] + // CHECK-NEXT: cir.store %[[X_ARG]], %[[X_ALLOC]] : !s32i, !cir.ptr<!s32i> + // CHECK-NEXT: cir.store %[[Y_ARG]], %[[Y_ALLOC]] : !u32i, !cir.ptr<!u32i> + // CHECK-NEXT: cir.store %[[F_ARG]], %[[F_ALLOC]] : !cir.float, !cir.ptr<!cir.float> + + // CHECK-NEXT: acc.atomic.read %[[X_ALLOC]] = %[[Y_ALLOC]] : !cir.ptr<!s32i>, !cir.ptr<!u32i>, !s32i +#pragma acc atomic read + x = y; + + // CHECK-NEXT: %[[X_LOAD:.*]] = cir.load{{.*}} %[[X_ALLOC]] : !cir.ptr<!s32i>, !s32i + // CHECK-NEXT: %[[X_CAST:.*]] = cir.cast integral %[[X_LOAD]] : !s32i -> !u32i + // CHECK-NEXT: %[[Y_LOAD:.*]] = cir.load{{.*}} %[[Y_ALLOC]] : !cir.ptr<!u32i>, !u32i + // CHECK-NEXT: %[[CMP:.*]] = cir.cmp(eq, %[[X_CAST]], %[[Y_LOAD]]) : !u32i, !cir.bool + // CHECK-NEXT: %[[CMP_CAST:.*]] = builtin.unrealized_conversion_cast %[[CMP]] : !cir.bool to i1 + // CHECK-NEXT: acc.atomic.read if(%[[CMP_CAST]]) %[[F_ALLOC]] = %[[Y_ALLOC]] : !cir.ptr<!cir.float>, !cir.ptr<!u32i>, !cir.float +#pragma acc atomic read if (x == y) + f = y; +} diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp index c1c2e4b..53eba7b 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct DefaultOperators { int i; @@ -480,7 +480,77 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators> // CHECK-NEXT: } for(int i = 0; i < 5; ++i); @@ -507,7 +577,77 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators> // CHECK-NEXT: } for(int i = 0; i < 5; ++i); @@ -1532,7 +1672,101 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators> +// +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> // CHECK-NEXT: } for(int i = 0; i < 5; ++i); @@ -1576,7 +1810,101 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators> +// +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> // CHECK-NEXT: } for(int i = 0; i < 5; ++i); @@ -2398,6 +2726,104 @@ void acc_combined() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators> +// +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> // CHECK-NEXT: } for(int i = 0; i < 5; ++i); @@ -2446,6 +2872,104 @@ void acc_combined() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators> +// +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> // CHECK-NEXT: } for(int i = 0; i < 5; ++i); diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp index 853f345..63d6952 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s template<typename T> void acc_combined() { T someVar; @@ -92,7 +92,18 @@ void acc_combined() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !cir.float, !cir.ptr<!cir.float> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -106,7 +117,18 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !cir.float, !cir.ptr<!cir.float> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -371,7 +393,41 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -401,7 +457,41 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -708,7 +798,45 @@ void acc_combined() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -744,7 +872,45 @@ void acc_combined() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> // CHECK-NEXT: } for(int i=0;i < 5; ++i); diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp index 67e8460..9c1b161 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct HasOperatorsInline { int i; @@ -14,8 +14,8 @@ struct HasOperatorsInline { HasOperatorsInline &operator&=(HasOperatorsInline& other); HasOperatorsInline &operator|=(HasOperatorsInline& other); HasOperatorsInline &operator^=(HasOperatorsInline& other); - bool &operator&&(HasOperatorsInline& other); - bool &operator||(HasOperatorsInline& other); + HasOperatorsInline &operator&&(HasOperatorsInline& other); + HasOperatorsInline &operator||(HasOperatorsInline& other); // For min/max bool operator<(HasOperatorsInline& other); HasOperatorsInline &operator=(HasOperatorsInline& other); @@ -277,7 +277,8 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineaaERS_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHSARG]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}): @@ -286,7 +287,7 @@ void acc_combined() { // CHECK-NEXT: } for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(||:someVar) -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <lor> init { +// CHECK: acc.reduction.recipe @reduction_lor__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <lor> init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i> @@ -308,7 +309,8 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineooERS_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHSARG]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}): @@ -318,7 +320,7 @@ void acc_combined() { for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(+:someVarArr) -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <add> init { +// CHECK: acc.reduction.recipe @reduction_add__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <add> init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"] @@ -1254,7 +1256,31 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineaaERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}): @@ -1318,7 +1344,31 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineooERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}): @@ -2121,6 +2171,35 @@ void acc_combined() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineaaERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): @@ -2198,6 +2277,35 @@ void acc_combined() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineooERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp index d74de82..78b43dd 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s template<typename T> void acc_combined() { @@ -145,7 +145,18 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !s32i, !cir.ptr<!s32i> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -159,7 +170,18 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !s32i, !cir.ptr<!s32i> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -587,7 +609,41 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -617,7 +673,41 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -1116,7 +1206,45 @@ void acc_combined() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -1152,7 +1280,45 @@ void acc_combined() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> // CHECK-NEXT: } for(int i=0;i < 5; ++i); diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp index a6df6c0..5b37071 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct HasOperatorsOutline { int i; unsigned u; @@ -15,8 +15,8 @@ HasOperatorsOutline &operator*=(HasOperatorsOutline &, HasOperatorsOutline &); HasOperatorsOutline &operator&=(HasOperatorsOutline &, HasOperatorsOutline &); HasOperatorsOutline &operator|=(HasOperatorsOutline &, HasOperatorsOutline &); HasOperatorsOutline &operator^=(HasOperatorsOutline &, HasOperatorsOutline &); -bool &operator&&(HasOperatorsOutline &, HasOperatorsOutline &); -bool &operator||(HasOperatorsOutline &, HasOperatorsOutline &); +HasOperatorsOutline &operator&&(HasOperatorsOutline &, HasOperatorsOutline &); +HasOperatorsOutline &operator||(HasOperatorsOutline &, HasOperatorsOutline &); // For min/max bool operator<(HasOperatorsOutline &, HasOperatorsOutline &); @@ -276,7 +276,8 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZaaR19HasOperatorsOutlineS0_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHSARG]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}): @@ -285,7 +286,7 @@ void acc_combined() { // CHECK-NEXT: } for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(||:someVar) -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <lor> init { +// CHECK: acc.reduction.recipe @reduction_lor__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <lor> init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i> @@ -307,7 +308,8 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZooR19HasOperatorsOutlineS0_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHSARG]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}): @@ -317,7 +319,7 @@ void acc_combined() { for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(+:someVarArr) -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <add> init { +// CHECK: acc.reduction.recipe @reduction_add__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <add> init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"] @@ -1253,7 +1255,31 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZaaR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}): @@ -1317,7 +1343,31 @@ void acc_combined() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZooR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}): @@ -2120,6 +2170,35 @@ void acc_combined() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZaaR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): @@ -2197,6 +2276,35 @@ void acc_combined() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZooR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c index d65d5d4..6ec1c43 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -std=c23 -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -std=c23 -triple x86_64-linux-pc %s -o - | FileCheck %s struct DefaultOperators { int i; @@ -485,7 +485,80 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast integral %[[RES_TO_INT]] : !s32i -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators> // CHECK-NEXT: } ; @@ -512,7 +585,80 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast integral %[[RES_TO_INT]] : !s32i -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators> // CHECK-NEXT: } ; @@ -1516,7 +1662,104 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators> +// +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast integral %[[RES_TO_INT]] : !s32i -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> // CHECK-NEXT: } ; @@ -1546,7 +1789,104 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators> +// +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast integral %[[RES_TO_INT]] : !s32i -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> // CHECK-NEXT: } ; @@ -2376,6 +2716,107 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators> +// +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast integral %[[RES_TO_INT]] : !s32i -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> // CHECK-NEXT: } ; @@ -2424,6 +2865,107 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators> +// +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast integral %[[RES_TO_INT]] : !s32i -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> // CHECK-NEXT: } ; diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp index f32fa2d..7bd6f67 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct DefaultOperators { int i; @@ -480,7 +480,77 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators> // CHECK-NEXT: } ; @@ -507,7 +577,77 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators> // CHECK-NEXT: } ; @@ -1532,7 +1672,101 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators> +// +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> // CHECK-NEXT: } ; @@ -1576,7 +1810,101 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators> +// +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> // CHECK-NEXT: } ; @@ -2398,6 +2726,104 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators> +// +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> // CHECK-NEXT: } ; @@ -2446,6 +2872,104 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators> +// +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> // CHECK-NEXT: } ; diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c index 9f73367..13c335b 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s void acc_compute() { float someVar; @@ -92,7 +92,19 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !cir.float, !cir.ptr<!cir.float> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float> // CHECK-NEXT: } ; @@ -106,7 +118,19 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !cir.float, !cir.ptr<!cir.float> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float> // CHECK-NEXT: } ; @@ -371,7 +395,42 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> // CHECK-NEXT: } ; @@ -401,7 +460,42 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> // CHECK-NEXT: } ; @@ -708,7 +802,46 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> // CHECK-NEXT: } ; @@ -744,7 +877,46 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast int_to_float %[[RES_TO_INT]] : !s32i -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> // CHECK-NEXT: } ; diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp index ffd2631..6737821 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s template<typename T> void acc_compute() { @@ -93,7 +93,18 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !cir.float, !cir.ptr<!cir.float> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float> // CHECK-NEXT: } ; @@ -107,7 +118,18 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !cir.float, !cir.ptr<!cir.float> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float> // CHECK-NEXT: } ; @@ -372,7 +394,41 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> // CHECK-NEXT: } ; @@ -402,7 +458,41 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> // CHECK-NEXT: } ; @@ -709,7 +799,45 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> // CHECK-NEXT: } ; @@ -745,7 +873,45 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> // CHECK-NEXT: } ; diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp index 1e367ee..262fe98 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct HasOperatorsInline { int i; @@ -14,8 +14,8 @@ struct HasOperatorsInline { HasOperatorsInline &operator&=(HasOperatorsInline& other); HasOperatorsInline &operator|=(HasOperatorsInline& other); HasOperatorsInline &operator^=(HasOperatorsInline& other); - bool &operator&&(HasOperatorsInline& other); - bool &operator||(HasOperatorsInline& other); + HasOperatorsInline &operator&&(HasOperatorsInline& other); + HasOperatorsInline &operator||(HasOperatorsInline& other); // For min/max bool operator<(HasOperatorsInline& other); HasOperatorsInline &operator=(HasOperatorsInline& other); @@ -277,7 +277,8 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineaaERS_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHSARG]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}): @@ -286,7 +287,7 @@ void acc_compute() { // CHECK-NEXT: } ; #pragma acc parallel reduction(||:someVar) -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <lor> init { +// CHECK: acc.reduction.recipe @reduction_lor__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <lor> init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i> @@ -308,7 +309,8 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineooERS_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHSARG]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}): @@ -318,7 +320,7 @@ void acc_compute() { ; #pragma acc parallel reduction(+:someVarArr) -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <add> init { +// CHECK: acc.reduction.recipe @reduction_add__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <add> init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"] @@ -1254,7 +1256,31 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineaaERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}): @@ -1318,7 +1344,31 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineooERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}): @@ -2121,6 +2171,35 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineaaERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): @@ -2198,6 +2277,35 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineooERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c index 2f42a5c..be7b123 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s void acc_compute() { int someVar; @@ -144,7 +144,18 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !s32i, !cir.ptr<!s32i> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i> // CHECK-NEXT: } ; @@ -158,7 +169,18 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !s32i, !cir.ptr<!s32i> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i> // CHECK-NEXT: } ; @@ -586,7 +608,41 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> // CHECK-NEXT: } ; @@ -616,7 +672,41 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> // CHECK-NEXT: } ; @@ -1115,7 +1205,45 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> // CHECK-NEXT: } ; @@ -1151,7 +1279,45 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> // CHECK-NEXT: } ; diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp index af7bcf3..fb6984f 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s template<typename T> void acc_compute() { @@ -145,7 +145,18 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !s32i, !cir.ptr<!s32i> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i> // CHECK-NEXT: } ; @@ -159,7 +170,18 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !s32i, !cir.ptr<!s32i> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i> // CHECK-NEXT: } ; @@ -587,7 +609,41 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> // CHECK-NEXT: } ; @@ -617,7 +673,41 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> // CHECK-NEXT: } ; @@ -1116,7 +1206,45 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> // CHECK-NEXT: } ; @@ -1152,7 +1280,45 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> // CHECK-NEXT: } ; diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp index ec890e2..3a80ed5 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct HasOperatorsOutline { int i; unsigned u; @@ -15,8 +15,8 @@ HasOperatorsOutline &operator*=(HasOperatorsOutline &, HasOperatorsOutline &); HasOperatorsOutline &operator&=(HasOperatorsOutline &, HasOperatorsOutline &); HasOperatorsOutline &operator|=(HasOperatorsOutline &, HasOperatorsOutline &); HasOperatorsOutline &operator^=(HasOperatorsOutline &, HasOperatorsOutline &); -bool &operator&&(HasOperatorsOutline &, HasOperatorsOutline &); -bool &operator||(HasOperatorsOutline &, HasOperatorsOutline &); +HasOperatorsOutline &operator&&(HasOperatorsOutline &, HasOperatorsOutline &); +HasOperatorsOutline &operator||(HasOperatorsOutline &, HasOperatorsOutline &); // For min/max bool operator<(HasOperatorsOutline &, HasOperatorsOutline &); @@ -276,7 +276,8 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZaaR19HasOperatorsOutlineS0_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHSARG]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}): @@ -285,7 +286,7 @@ void acc_compute() { // CHECK-NEXT: } ; #pragma acc parallel reduction(||:someVar) -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <lor> init { +// CHECK: acc.reduction.recipe @reduction_lor__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <lor> init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i> @@ -307,7 +308,8 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZooR19HasOperatorsOutlineS0_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHSARG]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}): @@ -317,7 +319,7 @@ void acc_compute() { ; #pragma acc parallel reduction(+:someVarArr) -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <add> init { +// CHECK: acc.reduction.recipe @reduction_add__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <add> init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"] @@ -1253,7 +1255,31 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZaaR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}): @@ -1317,7 +1343,31 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZooR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}): @@ -2120,6 +2170,35 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZaaR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): @@ -2197,6 +2276,35 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZooR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c index 08daa70..9b10a29 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s void acc_compute() { unsigned int someVar; @@ -143,7 +143,19 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!u32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!u32i> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_SINT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast integral %[[RES_TO_SINT]] : !s32i -> !u32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !u32i, !cir.ptr<!u32i> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!u32i> // CHECK-NEXT: } ; @@ -157,7 +169,19 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!u32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!u32i> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_SINT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast integral %[[RES_TO_SINT]] : !s32i -> !u32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !u32i, !cir.ptr<!u32i> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!u32i> // CHECK-NEXT: } ; @@ -585,7 +609,42 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!u32i>, !s64i) -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!u32i>, !s64i) -> !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_SINT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast integral %[[RES_TO_SINT]] : !s32i -> !u32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> // CHECK-NEXT: } ; @@ -615,7 +674,42 @@ void acc_compute() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!u32i>, !s64i) -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!u32i>, !s64i) -> !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_SINT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast integral %[[RES_TO_SINT]] : !s32i -> !u32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> // CHECK-NEXT: } ; @@ -1114,7 +1208,46 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!u32i>, !u64i) -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!u32i>, !u64i) -> !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_SINT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast integral %[[RES_TO_SINT]] : !s32i -> !u32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> // CHECK-NEXT: } ; @@ -1150,7 +1283,46 @@ void acc_compute() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!u32i>, !u64i) -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!u32i>, !u64i) -> !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_SINT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast integral %[[RES_TO_SINT]] : !s32i -> !u32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> // CHECK-NEXT: } ; diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp index 1a77c0f..11ebd7b 100644 --- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct DefaultOperators { int i; @@ -480,7 +480,77 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators> // CHECK-NEXT: } for(int i = 0; i < 5; ++i); @@ -507,7 +577,77 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators> // CHECK-NEXT: } for(int i = 0; i < 5; ++i); @@ -1532,7 +1672,101 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators> +// +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> // CHECK-NEXT: } for(int i = 0; i < 5; ++i); @@ -1576,7 +1810,101 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators> +// +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> // CHECK-NEXT: } for(int i = 0; i < 5; ++i); @@ -2398,6 +2726,104 @@ void acc_loop() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators> +// +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> // CHECK-NEXT: } for(int i = 0; i < 5; ++i); @@ -2446,6 +2872,104 @@ void acc_loop() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators> +// +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_GET_U:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[RHS_GET_U:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_U]] : !cir.ptr<!u32i>, !u32i +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !u32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !u32i +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_U]] : !u32i, !cir.ptr<!u32i> +// +// CHECK-NEXT: %[[LHS_GET_F:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_GET_F:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_F]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_F]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_GET_D:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[RHS_GET_D:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_D]] : !cir.ptr<!cir.double>, !cir.double +// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.double -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[RES_TO_VAL]], %[[LHS_GET_D]] : !cir.double, !cir.ptr<!cir.double> +// +// CHECK-NEXT: %[[LHS_GET_B:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[RHS_GET_B:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_LOAD]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_B]] : !cir.ptr<!cir.bool>, !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_LOAD]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: cir.store {{.*}} %[[TERNARY]], %[[LHS_GET_B]] : !cir.bool, !cir.ptr<!cir.bool> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> // CHECK-NEXT: } for(int i = 0; i < 5; ++i); diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp index 7faef71..57cc1af 100644 --- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp +++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s template<typename T> void acc_loop() { @@ -93,7 +93,18 @@ void acc_loop() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !cir.float, !cir.ptr<!cir.float> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -107,7 +118,18 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !cir.float, !cir.ptr<!cir.float> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -372,7 +394,41 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -402,7 +458,41 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -709,7 +799,45 @@ void acc_loop() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -745,7 +873,45 @@ void acc_loop() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[LHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast float_to_bool %[[RHS_LOAD]] : !cir.float -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_float %[[TERNARY]] : !cir.bool -> !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> // CHECK-NEXT: } for(int i=0;i < 5; ++i); diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp index 43c9fbbc..8a5bf3e 100644 --- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct HasOperatorsInline { int i; @@ -14,8 +14,8 @@ struct HasOperatorsInline { HasOperatorsInline &operator&=(HasOperatorsInline& other); HasOperatorsInline &operator|=(HasOperatorsInline& other); HasOperatorsInline &operator^=(HasOperatorsInline& other); - bool &operator&&(HasOperatorsInline& other); - bool &operator||(HasOperatorsInline& other); + HasOperatorsInline &operator&&(HasOperatorsInline& other); + HasOperatorsInline &operator||(HasOperatorsInline& other); // For min/max bool operator<(HasOperatorsInline& other); HasOperatorsInline &operator=(HasOperatorsInline& other); @@ -277,7 +277,8 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineaaERS_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHSARG]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}): @@ -286,7 +287,7 @@ void acc_loop() { // CHECK-NEXT: } for(int i=0;i < 5; ++i); #pragma acc loop reduction(||:someVar) -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <lor> init { +// CHECK: acc.reduction.recipe @reduction_lor__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <lor> init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i> @@ -308,7 +309,8 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineooERS_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHSARG]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}): @@ -318,7 +320,7 @@ void acc_loop() { for(int i=0;i < 5; ++i); #pragma acc loop reduction(+:someVarArr) -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <add> init { +// CHECK: acc.reduction.recipe @reduction_add__ZTSA5_18HasOperatorsInline : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> reduction_operator <add> init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsInline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsInline>>, ["arrayinit.temp"] @@ -1254,7 +1256,31 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineaaERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}): @@ -1318,7 +1344,31 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineooERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}): @@ -2121,6 +2171,35 @@ void acc_loop() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineaaERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): @@ -2198,6 +2277,35 @@ void acc_loop() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZN18HasOperatorsInlineooERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline> +// CHECK-NEXT: @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp index 5353218..f60dff9 100644 --- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp +++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s template<typename T> void acc_loop() { @@ -145,7 +145,18 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !s32i, !cir.ptr<!s32i> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -159,7 +170,18 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHSARG]] : !s32i, !cir.ptr<!s32i> // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -587,7 +609,41 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -617,7 +673,41 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -1116,7 +1206,45 @@ void acc_loop() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[FALSE:.*]] = cir.const #false +// CHECK-NEXT: cir.yield %[[FALSE]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> // CHECK-NEXT: } for(int i=0;i < 5; ++i); @@ -1152,7 +1280,45 @@ void acc_loop() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i> +// +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_TO_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_TO_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_TO_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_VAL:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_VAL]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i> +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> // CHECK-NEXT: } for(int i=0;i < 5; ++i); diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp index e193cfa..8613bc8a 100644 --- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct HasOperatorsOutline { int i; unsigned u; @@ -15,8 +15,8 @@ HasOperatorsOutline &operator*=(HasOperatorsOutline &, HasOperatorsOutline &); HasOperatorsOutline &operator&=(HasOperatorsOutline &, HasOperatorsOutline &); HasOperatorsOutline &operator|=(HasOperatorsOutline &, HasOperatorsOutline &); HasOperatorsOutline &operator^=(HasOperatorsOutline &, HasOperatorsOutline &); -bool &operator&&(HasOperatorsOutline &, HasOperatorsOutline &); -bool &operator||(HasOperatorsOutline &, HasOperatorsOutline &); +HasOperatorsOutline &operator&&(HasOperatorsOutline &, HasOperatorsOutline &); +HasOperatorsOutline &operator||(HasOperatorsOutline &, HasOperatorsOutline &); // For min/max bool operator<(HasOperatorsOutline &, HasOperatorsOutline &); @@ -276,7 +276,8 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZaaR19HasOperatorsOutlineS0_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHSARG]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}): @@ -285,7 +286,7 @@ void acc_loop() { // CHECK-NEXT: } for(int i=0;i < 5; ++i); #pragma acc loop reduction(||:someVar) -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <lor> init { +// CHECK: acc.reduction.recipe @reduction_lor__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <lor> init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i> @@ -307,7 +308,8 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZooR19HasOperatorsOutlineS0_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHSARG]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}): @@ -317,7 +319,7 @@ void acc_loop() { for(int i=0;i < 5; ++i); #pragma acc loop reduction(+:someVarArr) -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <add> init { +// CHECK: acc.reduction.recipe @reduction_add__ZTSA5_19HasOperatorsOutline : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> reduction_operator <add> init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array<!rec_HasOperatorsOutline x 5>, !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!cir.ptr<!rec_HasOperatorsOutline>>, ["arrayinit.temp"] @@ -1253,7 +1255,31 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZaaR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}): @@ -1317,7 +1343,31 @@ void acc_loop() { // // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}) -// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZooR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}): @@ -2120,6 +2170,35 @@ void acc_loop() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZaaR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): @@ -2197,6 +2276,35 @@ void acc_loop() { // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { // CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}})) +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline> +// +// CHECK-NEXT: %[[OP_RES:.*]] = cir.call @_ZooR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline> +// CHECK-NEXT: @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[OP_RES]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}): diff --git a/clang/test/CIR/CodeGenOpenACC/reduction-clause-recipes.cpp b/clang/test/CIR/CodeGenOpenACC/reduction-clause-recipes.cpp index 20ad7a3..29f1b5f 100644 --- a/clang/test/CIR/CodeGenOpenACC/reduction-clause-recipes.cpp +++ b/clang/test/CIR/CodeGenOpenACC/reduction-clause-recipes.cpp @@ -1,4 +1,4 @@ -// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s // Note: unlike the 'private' recipe checks, this is just for spot-checking, // so this test isn't as comprehensive. The same code paths are used for @@ -753,8 +753,97 @@ void do_things(unsigned A, unsigned B) { // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): -// CHECK-NEXT: acc.yield +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_TLA_LOAD:.*]] = cir.load %[[LHSARG]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>>, !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>> +// CHECK-NEXT: %[[LHS_BOUND3_STRIDE:.*]] = cir.ptr_stride %[[LHS_TLA_LOAD]], %[[ITR3_LOAD]] : (!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>, !u64i) -> !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>> +// CHECK-NEXT: %[[RHS_TLA_LOAD:.*]] = cir.load %[[RHSARG]] : !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>>, !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>> +// CHECK-NEXT: %[[RHS_BOUND3_STRIDE:.*]] = cir.ptr_stride %[[RHS_TLA_LOAD]], %[[ITR3_LOAD]] : (!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>, !u64i) -> !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_BOUND3_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHS_BOUND3_STRIDE]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>> -> !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[LHS_BOUND2_STRIDE:.*]] = cir.ptr_stride %[[LHS_BOUND3_STRIDE_DECAY]], %[[ITR2_LOAD]] : (!cir.ptr<!cir.ptr<!rec_NoOps>>, !u64i) -> !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[RHS_BOUND3_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHS_BOUND3_STRIDE]] : !cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>> -> !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: %[[RHS_BOUND2_STRIDE:.*]] = cir.ptr_stride %[[RHS_BOUND3_STRIDE_DECAY]], %[[ITR2_LOAD]] : (!cir.ptr<!cir.ptr<!rec_NoOps>>, !u64i) -> !cir.ptr<!cir.ptr<!rec_NoOps>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[LHS_BOUND2_STRIDE_LOAD:.*]] = cir.load %[[LHS_BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_BOUND2_STRIDE_LOAD]], %[[ITR1_LOAD]] : (!cir.ptr<!rec_NoOps>, !u64i) -> !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[RHS_BOUND2_STRIDE_LOAD:.*]] = cir.load %[[RHS_BOUND2_STRIDE]] : !cir.ptr<!cir.ptr<!rec_NoOps>>, !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_BOUND2_STRIDE_LOAD]], %[[ITR1_LOAD]] : (!cir.ptr<!rec_NoOps>, !u64i) -> !cir.ptr<!rec_NoOps> +// CHECK-NEXT: %[[LHS_GET_I:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_NoOps> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[RHS_GET_I:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_NoOps> -> !cir.ptr<!s32i> +// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[LHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[LHS_CAST_BOOL:.*]] = cir.cast int_to_bool %[[LHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LHS_CAST_BOOL]], true { +// CHECK-NEXT: %[[TRUE:.*]] = cir.const #true +// CHECK-NEXT: cir.yield %[[TRUE]] : !cir.bool +// CHECK-NEXT: }, false { +// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[RHS_GET_I]] : !cir.ptr<!s32i>, !s32i +// CHECK-NEXT: %[[RHS_CAST_BOOL:.*]] = cir.cast int_to_bool %[[RHS_LOAD]] : !s32i -> !cir.bool +// CHECK-NEXT: cir.yield %[[RHS_CAST_BOOL]] : !cir.bool +// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool +// CHECK-NEXT: %[[RES_TO_INT:.*]] = cir.cast bool_to_int %[[TERNARY]] : !cir.bool -> !s32i +// CHECK-NEXT: cir.store{{.*}} %[[RES_TO_INT]], %[[LHS_GET_I]] : !s32i, !cir.ptr<!s32i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield %[[LHSARG]] // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr<!cir.ptr<!cir.array<!cir.ptr<!rec_NoOps> x 5>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): // CHECK-NEXT: cir.scope { diff --git a/clang/test/CodeGen/AArch64/sign-return-address.c b/clang/test/CodeGen/AArch64/sign-return-address.c index 11dd683..2b505de 100644 --- a/clang/test/CodeGen/AArch64/sign-return-address.c +++ b/clang/test/CodeGen/AArch64/sign-return-address.c @@ -28,17 +28,17 @@ // NONE-NOT: !"branch-target-enforcement" // ALL-NOT: !"branch-target-enforcement" // PART-NOT: !"branch-target-enforcement" -// BTE: !{i32 8, !"branch-target-enforcement", i32 1} +// BTE: !{i32 8, !"branch-target-enforcement", i32 2} // B-KEY-NOT: !"branch-target-enforcement" // NONE-NOT: !"sign-return-address" -// ALL: !{i32 8, !"sign-return-address", i32 1} -// PART: !{i32 8, !"sign-return-address", i32 1} +// ALL: !{i32 8, !"sign-return-address", i32 2} +// PART: !{i32 8, !"sign-return-address", i32 2} // BTE-NOT: !"sign-return-address" -// B-KEY: !{i32 8, !"sign-return-address", i32 1} +// B-KEY: !{i32 8, !"sign-return-address", i32 2} // NONE-NOT: !"sign-return-address-all" -// ALL: !{i32 8, !"sign-return-address-all", i32 1} +// ALL: !{i32 8, !"sign-return-address-all", i32 2} // PART-NOT: !"sign-return-address-all" // BTE-NOT: !"sign-return-address-all" // B-KEY-NOT: !"sign-return-address-all" @@ -47,6 +47,6 @@ // ALL-NOT: !"sign-return-address-with-bkey" // PART-NOT: !"sign-return-address-with-bkey" // BTE-NOT: !"sign-return-address-with-bkey" -// B-KEY: !{i32 8, !"sign-return-address-with-bkey", i32 1} +// B-KEY: !{i32 8, !"sign-return-address-with-bkey", i32 2} void foo() {} diff --git a/clang/test/CodeGen/arm-branch-protection-attr-2.c b/clang/test/CodeGen/arm-branch-protection-attr-2.c index fad5dc0..5391537 100644 --- a/clang/test/CodeGen/arm-branch-protection-attr-2.c +++ b/clang/test/CodeGen/arm-branch-protection-attr-2.c @@ -23,16 +23,16 @@ // NONE-NOT: !"branch-target-enforcement" // PART-NOT: !"branch-target-enforcement" // ALL-NOT: !"branch-target-enforcement" -// BTE: !{i32 8, !"branch-target-enforcement", i32 1} +// BTE: !{i32 8, !"branch-target-enforcement", i32 2} // NONE-NOT: !"sign-return-address" -// PART: !{i32 8, !"sign-return-address", i32 1} -// ALL: !{i32 8, !"sign-return-address", i32 1} +// PART: !{i32 8, !"sign-return-address", i32 2} +// ALL: !{i32 8, !"sign-return-address", i32 2} // BTE-NOT: !"sign-return-address" // NONE-NOT: !"sign-return-address-all", i32 0} // PART-NOT: !"sign-return-address-all", i32 0} -// ALL: !{i32 8, !"sign-return-address-all", i32 1} +// ALL: !{i32 8, !"sign-return-address-all", i32 2} // BTE-NOT: !"sign-return-address-all", i32 0} void foo() {} diff --git a/clang/test/Driver/fsanitize-alloc-token.c b/clang/test/Driver/fsanitize-alloc-token.c index 2964f60..6d8bda1 100644 --- a/clang/test/Driver/fsanitize-alloc-token.c +++ b/clang/test/Driver/fsanitize-alloc-token.c @@ -41,3 +41,14 @@ // CHECK-MAX: "-falloc-token-max=42" // RUN: not %clang --target=x86_64-linux-gnu -fsanitize=alloc-token -falloc-token-max=-1 %s 2>&1 | FileCheck -check-prefix=CHECK-INVALID-MAX %s // CHECK-INVALID-MAX: error: invalid value + +// RUN: %clang --target=x86_64-linux-gnu -Xclang -falloc-token-mode=increment %s -### 2>&1 | FileCheck -check-prefix=CHECK-MODE-INCREMENT %s +// CHECK-MODE-INCREMENT: "-falloc-token-mode=increment" +// RUN: %clang --target=x86_64-linux-gnu -Xclang -falloc-token-mode=random %s -### 2>&1 | FileCheck -check-prefix=CHECK-MODE-RANDOM %s +// CHECK-MODE-RANDOM: "-falloc-token-mode=random" +// RUN: %clang --target=x86_64-linux-gnu -Xclang -falloc-token-mode=typehash %s -### 2>&1 | FileCheck -check-prefix=CHECK-MODE-TYPEHASH %s +// CHECK-MODE-TYPEHASH: "-falloc-token-mode=typehash" +// RUN: %clang --target=x86_64-linux-gnu -Xclang -falloc-token-mode=typehashpointersplit %s -### 2>&1 | FileCheck -check-prefix=CHECK-MODE-TYPEHASHPTRSPLIT %s +// CHECK-MODE-TYPEHASHPTRSPLIT: "-falloc-token-mode=typehashpointersplit" +// RUN: not %clang --target=x86_64-linux-gnu -Xclang -falloc-token-mode=asdf %s 2>&1 | FileCheck -check-prefix=CHECK-INVALID-MODE %s +// CHECK-INVALID-MODE: error: invalid value 'asdf' diff --git a/clang/test/Driver/print-supported-extensions-riscv.c b/clang/test/Driver/print-supported-extensions-riscv.c index 7972b9e..cb81273 100644 --- a/clang/test/Driver/print-supported-extensions-riscv.c +++ b/clang/test/Driver/print-supported-extensions-riscv.c @@ -190,6 +190,11 @@ // CHECK-NEXT: xsfmm64t 0.6 'XSfmm64t' (TE=64 configuration) // CHECK-NEXT: xsfmmbase 0.6 'XSfmmbase' (All non arithmetic instructions for all TEWs and sf.vtzero) // CHECK-NEXT: xsfvcp 1.0 'XSfvcp' (SiFive Custom Vector Coprocessor Interface Instructions) +// CHECK-NEXT: xsfvfbfexp16e 0.5 'XSfvfbfexp16e' (SiFive Vector Floating-Point Exponential Function Instruction, BFloat16) +// CHECK-NEXT: xsfvfexp16e 0.5 'XSfvfexp16e' (SiFive Vector Floating-Point Exponential Function Instruction, Half Precision) +// CHECK-NEXT: xsfvfexp32e 0.5 'XSfvfexp32e' (SiFive Vector Floating-Point Exponential Function Instruction, Single Precision) +// CHECK-NEXT: xsfvfexpa 0.2 'XSfvfexpa' (SiFive Vector Floating-Point Exponential Approximation Instruction) +// CHECK-NEXT: xsfvfexpa64e 0.2 'XSfvfexpa64e' (SiFive Vector Floating-Point Exponential Approximation Instruction with Double-Precision) // CHECK-NEXT: xsfvfnrclipxfqf 1.0 'XSfvfnrclipxfqf' (SiFive FP32-to-int8 Ranged Clip Instructions) // CHECK-NEXT: xsfvfwmaccqqq 1.0 'XSfvfwmaccqqq' (SiFive Matrix Multiply Accumulate Instruction (4-by-4)) // CHECK-NEXT: xsfvqmaccdod 1.0 'XSfvqmaccdod' (SiFive Int8 Matrix Multiplication Instructions (2-by-8 and 8-by-2)) diff --git a/clang/test/Frontend/arm-ignore-branch-protection-option.c b/clang/test/Frontend/arm-ignore-branch-protection-option.c index 99a2acc..45bdb37 100644 --- a/clang/test/Frontend/arm-ignore-branch-protection-option.c +++ b/clang/test/Frontend/arm-ignore-branch-protection-option.c @@ -15,4 +15,4 @@ __attribute__((target("arch=cortex-m0"))) void f() {} // CHECK-NOT: attributes { {{.*}} "branch-target-enforcement" /// Check that there are branch protection module attributes despite the warning. -// CHECK: !{i32 8, !"branch-target-enforcement", i32 1} +// CHECK: !{i32 8, !"branch-target-enforcement", i32 2} diff --git a/clang/test/Preprocessor/riscv-target-features-sifive.c b/clang/test/Preprocessor/riscv-target-features-sifive.c index 1c49b55..5002f62 100644 --- a/clang/test/Preprocessor/riscv-target-features-sifive.c +++ b/clang/test/Preprocessor/riscv-target-features-sifive.c @@ -5,6 +5,11 @@ // CHECK-NOT: __riscv_xsfcease {{.*$}} // CHECK-NOT: __riscv_xsfvcp {{.*$}} +// CHECK-NOT: __riscv_xsfvfbfexp16e {{.*$}} +// CHECK-NOT: __riscv_xsfvfexp16e {{.*$}} +// CHECK-NOT: __riscv_xsfvfexp32e {{.*$}} +// CHECK-NOT: __riscv_xsfvfexpa {{.*$}} +// CHECK-NOT: __riscv_xsfvfexpa64e {{.*$}} // CHECK-NOT: __riscv_xsfvfnrclipxfqf {{.*$}} // CHECK-NOT: __riscv_xsfvfwmaccqqq {{.*$}} // CHECK-NOT: __riscv_xsfqmaccdod {{.*$}} @@ -39,6 +44,46 @@ // CHECK-XSFVCP-EXT: __riscv_xsfvcp 1000000{{$}} // RUN: %clang --target=riscv32-unknown-linux-gnu \ +// RUN: -march=rv32ixsfvfbfexp16e_zvfbfmin -E -dM %s \ +// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFBFEXP16E-EXT %s +// RUN: %clang --target=riscv64-unknown-linux-gnu \ +// RUN: -march=rv64ixsfvfbfexp16e_zvfbfmin -E -dM %s \ +// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFBFEXP16E-EXT %s +// CHECK-XSFVFBFEXP16E-EXT: __riscv_xsfvfbfexp16e 5000{{$}} + +// RUN: %clang --target=riscv32-unknown-linux-gnu \ +// RUN: -march=rv32ixsfvfexp16e -E -dM %s \ +// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXP16E-EXT %s +// RUN: %clang --target=riscv64-unknown-linux-gnu \ +// RUN: -march=rv64ixsfvfexp16e -E -dM %s \ +// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXP16E-EXT %s +// CHECK-XSFVFEXP16E-EXT: __riscv_xsfvfexp16e 5000{{$}} + +// RUN: %clang --target=riscv32-unknown-linux-gnu \ +// RUN: -march=rv32ixsfvfexp32e -E -dM %s \ +// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXP32E-EXT %s +// RUN: %clang --target=riscv64-unknown-linux-gnu \ +// RUN: -march=rv64ixsfvfexp32e -E -dM %s \ +// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXP32E-EXT %s +// CHECK-XSFVFEXP32E-EXT: __riscv_xsfvfexp32e 5000{{$}} + +// RUN: %clang --target=riscv32-unknown-linux-gnu \ +// RUN: -march=rv32ixsfvfexpa -E -dM %s \ +// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXPA-EXT %s +// RUN: %clang --target=riscv64-unknown-linux-gnu \ +// RUN: -march=rv64ixsfvfexpa -E -dM %s \ +// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXPA-EXT %s +// CHECK-XSFVFEXPA-EXT: __riscv_xsfvfexpa 2000{{$}} + +// RUN: %clang --target=riscv32-unknown-linux-gnu \ +// RUN: -march=rv32ixsfvfexpa64e -E -dM %s \ +// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXPA64E-EXT %s +// RUN: %clang --target=riscv64-unknown-linux-gnu \ +// RUN: -march=rv64ixsfvfexpa64e -E -dM %s \ +// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFEXPA64E-EXT %s +// CHECK-XSFVFEXPA64E-EXT: __riscv_xsfvfexpa64e 2000{{$}} + +// RUN: %clang --target=riscv32-unknown-linux-gnu \ // RUN: -march=rv32ixsfvfnrclipxfqf -E -dM %s \ // RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFNRCLIPXFQF-EXT %s // RUN: %clang --target=riscv64-unknown-linux-gnu \ diff --git a/clang/test/lit.cfg.py b/clang/test/lit.cfg.py index 29088ef..52b275c 100644 --- a/clang/test/lit.cfg.py +++ b/clang/test/lit.cfg.py @@ -18,11 +18,22 @@ from lit.llvm.subst import FindTool # name: The name of this test suite. config.name = "Clang" +# TODO: Consolidate the logic for turning on the internal shell by default for all LLVM test suites. +# See https://github.com/llvm/llvm-project/issues/106636 for more details. +# +# We prefer the lit internal shell which provides a better user experience on failures +# and is faster unless the user explicitly disables it with LIT_USE_INTERNAL_SHELL=0 +# env var. +use_lit_shell = True +lit_shell_env = os.environ.get("LIT_USE_INTERNAL_SHELL") +if lit_shell_env: + use_lit_shell = lit.util.pythonize_bool(lit_shell_env) + # testFormat: The test format to use to interpret tests. # # For now we require '&&' between commands, until they get globally killed and # the test runner updated. -config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell) +config.test_format = lit.formats.ShTest(execute_external=not use_lit_shell) # suffixes: A list of file extensions to treat as test files. config.suffixes = [ diff --git a/clang/tools/clang-fuzzer/Dockerfile b/clang/tools/clang-fuzzer/Dockerfile deleted file mode 100644 index 1ddf829..0000000 --- a/clang/tools/clang-fuzzer/Dockerfile +++ /dev/null @@ -1,41 +0,0 @@ -#===- llvm/tools/clang/tools/clang-fuzzer ---------------------------------===// -# -# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -# See https://llvm.org/LICENSE.txt for license information. -# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -# -#===----------------------------------------------------------------------===// -# Produces an image that builds clang-proto-fuzzer -FROM ubuntu:16.04 -RUN apt-get update -y -RUN apt-get install -y autoconf automake libtool curl make g++ unzip wget git \ - binutils liblzma-dev libz-dev python-all cmake ninja-build subversion \ - pkg-config docbook2x - -WORKDIR /root - -# Get protobuf -RUN wget -qO- https://github.com/google/protobuf/releases/download/v3.3.0/protobuf-cpp-3.3.0.tar.gz | tar zxf - -RUN cd protobuf-3.3.0 && ./autogen.sh && ./configure && make -j $(nproc) && make check -j $(nproc) && make install && ldconfig -# Get LLVM -RUN svn co http://llvm.org/svn/llvm-project/llvm/trunk llvm -RUN cd llvm/tools && svn co http://llvm.org/svn/llvm-project/cfe/trunk clang -r $(cd ../ && svn info | grep Revision | awk '{print $2}') -RUN cd llvm/projects && svn co http://llvm.org/svn/llvm-project/compiler-rt/trunk compiler-rt -r $(cd ../ && svn info | grep Revision | awk '{print $2}') -# Build plain LLVM (stage 0) -RUN mkdir build0 && cd build0 && cmake -GNinja -DCMAKE_BUILD_TYPE=Release ../llvm && ninja -# Configure instrumented LLVM (stage 1) -RUN mkdir build1 && cd build1 && cmake -GNinja -DCMAKE_BUILD_TYPE=Release ../llvm \ - -DLLVM_ENABLE_ASSERTIONS=ON \ - -DCMAKE_C_COMPILER=`pwd`/../build0/bin/clang \ - -DCMAKE_CXX_COMPILER=`pwd`/../build0/bin/clang++ \ - -DLLVM_USE_SANITIZE_COVERAGE=YES \ - -DLLVM_USE_SANITIZER=Address -DCLANG_ENABLE_PROTO_FUZZER=ON -# Build the fuzzers -RUN cd build1 && ninja clang-fuzzer -RUN cd build1 && ninja clang-objc-fuzzer -RUN cd build1 && ninja clang-proto-fuzzer -RUN cd build1 && ninja clang-proto-to-cxx -RUN cd build1 && ninja clang-loop-proto-to-cxx -RUN cd build1 && ninja clang-loop-proto-to-llvm -RUN cd build1 && ninja clang-loop-proto-fuzzer -RUN cd build1 && ninja clang-llvm-proto-fuzzer diff --git a/clang/tools/clang-fuzzer/README.txt b/clang/tools/clang-fuzzer/README.txt index eec4a9e..218c544 100644 --- a/clang/tools/clang-fuzzer/README.txt +++ b/clang/tools/clang-fuzzer/README.txt @@ -99,9 +99,6 @@ Example: -DCLANG_ENABLE_PROTO_FUZZER=ON ninja clang-proto-fuzzer clang-proto-to-cxx -This directory also contains a Dockerfile which sets up all required -dependencies and builds the fuzzers. - ============================ Running clang-proto-fuzzer ============================ diff --git a/clang/unittests/Sema/HeuristicResolverTest.cpp b/clang/unittests/Sema/HeuristicResolverTest.cpp index a00632f..c592e74 100644 --- a/clang/unittests/Sema/HeuristicResolverTest.cpp +++ b/clang/unittests/Sema/HeuristicResolverTest.cpp @@ -524,6 +524,28 @@ TEST(HeuristicResolver, MemberExpr_HangIssue126536) { cxxDependentScopeMemberExpr(hasMemberName("foo")).bind("input")); } +TEST(HeuristicResolver, MemberExpr_HangOnLongCallChain) { + const size_t CallChainLength = 50; + std::string Code = R"cpp( + template <typename T> + void foo(T t) { + t + )cpp"; + for (size_t I = 0; I < CallChainLength; ++I) + Code.append(".method()\n"); + Code.append(R"cpp( + .lastMethod(); + } + )cpp"); + // Test that resolution of a name whose base is a long call chain + // does not hang. Note that the hang for which this is a regression + // test is finite (exponential runtime in the length of the chain), + // so a "failure" here manifests as abnormally long runtime. + expectResolution( + Code, &HeuristicResolver::resolveMemberExpr, + cxxDependentScopeMemberExpr(hasMemberName("lastMethod")).bind("input")); +} + TEST(HeuristicResolver, MemberExpr_DefaultTemplateArgument) { std::string Code = R"cpp( struct Default { diff --git a/compiler-rt/lib/builtins/assembly.h b/compiler-rt/lib/builtins/assembly.h index 79a45d91..d28f73f 100644 --- a/compiler-rt/lib/builtins/assembly.h +++ b/compiler-rt/lib/builtins/assembly.h @@ -337,7 +337,7 @@ #endif #endif -#if defined(__i386__) || defined(__amd64__) +#if defined(__ASSEMBLER__) && (defined(__i386__) || defined(__amd64__)) .att_syntax #endif diff --git a/flang/include/flang/Optimizer/Dialect/FIROps.td b/flang/include/flang/Optimizer/Dialect/FIROps.td index fc6eedc..86502c6 100644 --- a/flang/include/flang/Optimizer/Dialect/FIROps.td +++ b/flang/include/flang/Optimizer/Dialect/FIROps.td @@ -1249,6 +1249,41 @@ def fir_IsAssumedSizeOp : fir_SimpleOp<"is_assumed_size", [NoMemoryEffect]> { let results = (outs BoolLike); } +def fir_AssumedSizeExtentOp : fir_SimpleOneResultOp<"assumed_size_extent", [NoMemoryEffect]> { + let summary = "get the assumed-size last extent sentinel"; + + let description = [{ + Returns the special extent value representing the last dimension of an + assumed-size array. This is used to model the semantics in FIR without + directly materializing the sentinel value. The concrete encoding is + introduced during FIR to LLVM lowering. + + ``` + %e = fir.assumed_size_extent : index + ``` + }]; + + let results = (outs Index); + let assemblyFormat = "attr-dict `:` type(results)"; +} + +def fir_IsAssumedSizeExtentOp : fir_SimpleOp<"is_assumed_size_extent", [NoMemoryEffect]> { + let summary = "is value the assumed-size last extent sentinel"; + + let description = [{ + Returns true iff the given integer equals the assumed-size extent sentinel. + + ``` + %t = fir.is_assumed_size_extent %v : (index) -> i1 + %c = fir.is_assumed_size_extent %x : (i32) -> i1 + ``` + }]; + + let arguments = (ins AnyIntegerLike:$val); + let results = (outs BoolLike); + let hasCanonicalizer = 1; +} + def fir_BoxIsPtrOp : fir_SimpleOp<"box_isptr", [NoMemoryEffect]> { let summary = "is the boxed value a POINTER?"; diff --git a/flang/include/flang/Semantics/scope.h b/flang/include/flang/Semantics/scope.h index 3195892..ecffdb4 100644 --- a/flang/include/flang/Semantics/scope.h +++ b/flang/include/flang/Semantics/scope.h @@ -86,6 +86,13 @@ public: CHECK(parent_ != this); return *parent_; } + + mapType &commonBlocks() { return commonBlocks_; } + const mapType &commonBlocks() const { return commonBlocks_; } + + mapType &commonBlockUses() { return commonBlockUses_; } + const mapType &commonBlockUses() const { return commonBlockUses_; } + Kind kind() const { return kind_; } bool IsGlobal() const { return kind_ == Kind::Global; } bool IsIntrinsicModules() const { return kind_ == Kind::IntrinsicModules; } @@ -186,10 +193,19 @@ public: // Cray pointers are saved as map of pointee name -> pointer symbol const mapType &crayPointers() const { return crayPointers_; } void add_crayPointer(const SourceName &, Symbol &); - mapType &commonBlocks() { return commonBlocks_; } - const mapType &commonBlocks() const { return commonBlocks_; } Symbol &MakeCommonBlock(SourceName, SourceName location); - Symbol *FindCommonBlock(const SourceName &) const; + bool AddCommonBlockUse( + const SourceName &name, Attrs attrs, Symbol &cbUltimate); + + // Find COMMON block that is declared in the current scope + Symbol *FindCommonBlock(const SourceName &name) const; + + // Find USE-associated COMMON block in the current scope + Symbol *FindCommonBlockUse(const SourceName &name) const; + + // Find COMMON block in current and surrounding scopes, follow USE + // associations + Symbol *FindCommonBlockInVisibleScopes(const SourceName &) const; /// Make a Symbol but don't add it to the scope. template <typename D> @@ -283,6 +299,7 @@ private: std::list<Scope> children_; mapType symbols_; mapType commonBlocks_; + mapType commonBlockUses_; // USE-assocated COMMON blocks std::list<EquivalenceSet> equivalenceSets_; mapType crayPointers_; std::map<SourceName, common::Reference<Scope>> submodules_; diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp index 3b711cc..238a623 100644 --- a/flang/lib/Lower/Bridge.cpp +++ b/flang/lib/Lower/Bridge.cpp @@ -4010,8 +4010,8 @@ private: // parameters and dynamic type. The selector cannot be a // POINTER/ALLOCATBLE as per F'2023 C1160. fir::ExtendedValue newExv; - llvm::SmallVector assumeSizeExtents{ - builder->createMinusOneInteger(loc, builder->getIndexType())}; + llvm::SmallVector<mlir::Value> assumeSizeExtents{ + fir::AssumedSizeExtentOp::create(*builder, loc)}; mlir::Value baseAddr = hlfir::genVariableRawAddress(loc, *builder, selector); const bool isVolatile = fir::isa_volatile_type(selector.getType()); diff --git a/flang/lib/Lower/ConvertVariable.cpp b/flang/lib/Lower/ConvertVariable.cpp index 00ec1b5..2517ab3 100644 --- a/flang/lib/Lower/ConvertVariable.cpp +++ b/flang/lib/Lower/ConvertVariable.cpp @@ -1711,7 +1711,7 @@ static void lowerExplicitLowerBounds( /// CFI_desc_t requirements in 18.5.3 point 5.). static mlir::Value getAssumedSizeExtent(mlir::Location loc, fir::FirOpBuilder &builder) { - return builder.createMinusOneInteger(loc, builder.getIndexType()); + return fir::AssumedSizeExtentOp::create(builder, loc); } /// Lower explicit extents into \p result if this is an explicit-shape or diff --git a/flang/lib/Lower/OpenACC.cpp b/flang/lib/Lower/OpenACC.cpp index af4f420..1fc59c7 100644 --- a/flang/lib/Lower/OpenACC.cpp +++ b/flang/lib/Lower/OpenACC.cpp @@ -2366,6 +2366,23 @@ static void processDoLoopBounds( } } +static void remapCommonBlockMember( + Fortran::lower::AbstractConverter &converter, mlir::Location loc, + const Fortran::semantics::Symbol &member, + mlir::Value newCommonBlockBaseAddress, + const Fortran::semantics::Symbol &commonBlockSymbol, + llvm::SmallPtrSetImpl<const Fortran::semantics::Symbol *> &seenSymbols) { + if (seenSymbols.contains(&member)) + return; + mlir::Value accMemberValue = Fortran::lower::genCommonBlockMember( + converter, loc, member, newCommonBlockBaseAddress, + commonBlockSymbol.size()); + fir::ExtendedValue hostExv = converter.getSymbolExtendedValue(member); + fir::ExtendedValue accExv = fir::substBase(hostExv, accMemberValue); + converter.bindSymbol(member, accExv); + seenSymbols.insert(&member); +} + /// Remap symbols that appeared in OpenACC data clauses to use the results of /// the corresponding data operations. This allows isolating symbol accesses /// inside the OpenACC region from accesses in the host and other regions while @@ -2391,14 +2408,39 @@ static void remapDataOperandSymbols( builder.setInsertionPointToStart(®ionOp.getRegion().front()); llvm::SmallPtrSet<const Fortran::semantics::Symbol *, 8> seenSymbols; mlir::IRMapping mapper; + mlir::Location loc = regionOp.getLoc(); for (auto [value, symbol] : dataOperandSymbolPairs) { - - // If A symbol appears on several data clause, just map it to the first + // If a symbol appears on several data clause, just map it to the first // result (all data operations results for a symbol are pointing same // memory, so it does not matter which one is used). if (seenSymbols.contains(&symbol.get())) continue; seenSymbols.insert(&symbol.get()); + // When a common block appears in a directive, remap its members. + // Note: this will instantiate all common block members even if they are not + // used inside the region. If hlfir.declare DCE is not made possible, this + // could be improved to reduce IR noise. + if (const auto *commonBlock = symbol->template detailsIf< + Fortran::semantics::CommonBlockDetails>()) { + const Fortran::semantics::Scope &commonScope = symbol->owner(); + if (commonScope.equivalenceSets().empty()) { + for (auto member : commonBlock->objects()) + remapCommonBlockMember(converter, loc, *member, value, *symbol, + seenSymbols); + } else { + // Objects equivalenced with common block members still belong to the + // common block storage even if they are not part of the common block + // declaration. The easiest and most robust way to find all symbols + // belonging to the common block is to loop through the scope symbols + // and check if they belong to the common. + for (const auto &scopeSymbol : commonScope) + if (Fortran::semantics::FindCommonBlockContaining( + *scopeSymbol.second) == &symbol.get()) + remapCommonBlockMember(converter, loc, *scopeSymbol.second, value, + *symbol, seenSymbols); + } + continue; + } std::optional<fir::FortranVariableOpInterface> hostDef = symbolMap.lookupVariableDefinition(symbol); assert(hostDef.has_value() && llvm::isa<hlfir::DeclareOp>(*hostDef) && @@ -2415,10 +2457,8 @@ static void remapDataOperandSymbols( "box type mismatch between compute region variable and " "hlfir.declare input unexpected"); if (Fortran::semantics::IsOptional(symbol)) - TODO(regionOp.getLoc(), - "remapping OPTIONAL symbol in OpenACC compute region"); - auto rawValue = - fir::BoxAddrOp::create(builder, regionOp.getLoc(), hostType, value); + TODO(loc, "remapping OPTIONAL symbol in OpenACC compute region"); + auto rawValue = fir::BoxAddrOp::create(builder, loc, hostType, value); mapper.map(hostInput, rawValue); } else { assert(!llvm::isa<fir::BaseBoxType>(hostType) && @@ -2430,8 +2470,7 @@ static void remapDataOperandSymbols( assert(fir::isa_ref_type(hostType) && fir::isa_ref_type(computeType) && "compute region variable and host variable should both be raw " "addresses"); - mlir::Value cast = - builder.createConvert(regionOp.getLoc(), hostType, value); + mlir::Value cast = builder.createConvert(loc, hostType, value); mapper.map(hostInput, cast); } if (mlir::Value dummyScope = hostDeclare.getDummyScope()) { diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp index 70bb43a2..e71f4e3 100644 --- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp +++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp @@ -749,6 +749,44 @@ struct VolatileCastOpConversion } }; +/// Lower `fir.assumed_size_extent` to constant -1 of index type. +struct AssumedSizeExtentOpConversion + : public fir::FIROpConversion<fir::AssumedSizeExtentOp> { + using FIROpConversion::FIROpConversion; + + llvm::LogicalResult + matchAndRewrite(fir::AssumedSizeExtentOp op, OpAdaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + mlir::Location loc = op.getLoc(); + mlir::Type ity = lowerTy().indexType(); + auto cst = fir::genConstantIndex(loc, ity, rewriter, -1); + rewriter.replaceOp(op, cst.getResult()); + return mlir::success(); + } +}; + +/// Lower `fir.is_assumed_size_extent` to integer equality with -1. +struct IsAssumedSizeExtentOpConversion + : public fir::FIROpConversion<fir::IsAssumedSizeExtentOp> { + using FIROpConversion::FIROpConversion; + + llvm::LogicalResult + matchAndRewrite(fir::IsAssumedSizeExtentOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + mlir::Location loc = op.getLoc(); + mlir::Value val = adaptor.getVal(); + mlir::Type valTy = val.getType(); + // Create constant -1 of the operand type. + auto negOneAttr = rewriter.getIntegerAttr(valTy, -1); + auto negOne = + mlir::LLVM::ConstantOp::create(rewriter, loc, valTy, negOneAttr); + auto cmp = mlir::LLVM::ICmpOp::create( + rewriter, loc, mlir::LLVM::ICmpPredicate::eq, val, negOne); + rewriter.replaceOp(op, cmp.getResult()); + return mlir::success(); + } +}; + /// convert value of from-type to value of to-type struct ConvertOpConversion : public fir::FIROpConversion<fir::ConvertOp> { using FIROpConversion::FIROpConversion; @@ -4360,6 +4398,7 @@ void fir::populateFIRToLLVMConversionPatterns( AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, + AssumedSizeExtentOpConversion, IsAssumedSizeExtentOpConversion, BoxOffsetOpConversion, BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeCodeOpConversion, BoxTypeDescOpConversion, CallOpConversion, CmpcOpConversion, VolatileCastOpConversion, ConvertOpConversion, diff --git a/flang/lib/Optimizer/Dialect/FIROps.cpp b/flang/lib/Optimizer/Dialect/FIROps.cpp index 1712af1..d0164f3 100644 --- a/flang/lib/Optimizer/Dialect/FIROps.cpp +++ b/flang/lib/Optimizer/Dialect/FIROps.cpp @@ -5143,6 +5143,34 @@ void fir::BoxTotalElementsOp::getCanonicalizationPatterns( } //===----------------------------------------------------------------------===// +// IsAssumedSizeExtentOp and AssumedSizeExtentOp +//===----------------------------------------------------------------------===// + +namespace { +struct FoldIsAssumedSizeExtentOnCtor + : public mlir::OpRewritePattern<fir::IsAssumedSizeExtentOp> { + using mlir::OpRewritePattern<fir::IsAssumedSizeExtentOp>::OpRewritePattern; + mlir::LogicalResult + matchAndRewrite(fir::IsAssumedSizeExtentOp op, + mlir::PatternRewriter &rewriter) const override { + if (llvm::isa_and_nonnull<fir::AssumedSizeExtentOp>( + op.getVal().getDefiningOp())) { + mlir::Type i1 = rewriter.getI1Type(); + rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( + op, i1, rewriter.getIntegerAttr(i1, 1)); + return mlir::success(); + } + return mlir::failure(); + } +}; +} // namespace + +void fir::IsAssumedSizeExtentOp::getCanonicalizationPatterns( + mlir::RewritePatternSet &patterns, mlir::MLIRContext *context) { + patterns.add<FoldIsAssumedSizeExtentOnCtor>(context); +} + +//===----------------------------------------------------------------------===// // LocalitySpecifierOp //===----------------------------------------------------------------------===// diff --git a/flang/lib/Optimizer/Transforms/ArrayValueCopy.cpp b/flang/lib/Optimizer/Transforms/ArrayValueCopy.cpp index ed9a2ae..5bf783d 100644 --- a/flang/lib/Optimizer/Transforms/ArrayValueCopy.cpp +++ b/flang/lib/Optimizer/Transforms/ArrayValueCopy.cpp @@ -832,8 +832,8 @@ static mlir::Type getEleTy(mlir::Type ty) { static bool isAssumedSize(llvm::SmallVectorImpl<mlir::Value> &extents) { if (extents.empty()) return false; - auto cstLen = fir::getIntIfConstant(extents.back()); - return cstLen.has_value() && *cstLen == -1; + return llvm::isa_and_nonnull<fir::AssumedSizeExtentOp>( + extents.back().getDefiningOp()); } // Extract extents from the ShapeOp/ShapeShiftOp into the result vector. diff --git a/flang/lib/Semantics/resolve-directives.cpp b/flang/lib/Semantics/resolve-directives.cpp index b0c36ec..c410bd4 100644 --- a/flang/lib/Semantics/resolve-directives.cpp +++ b/flang/lib/Semantics/resolve-directives.cpp @@ -1723,17 +1723,12 @@ void AccAttributeVisitor::Post(const parser::Name &name) { Symbol *AccAttributeVisitor::ResolveAccCommonBlockName( const parser::Name *name) { - if (auto *prev{name - ? GetContext().scope.parent().FindCommonBlock(name->source) - : nullptr}) { - name->symbol = prev; - return prev; - } - // Check if the Common Block is declared in the current scope - if (auto *commonBlockSymbol{ - name ? GetContext().scope.FindCommonBlock(name->source) : nullptr}) { - name->symbol = commonBlockSymbol; - return commonBlockSymbol; + if (name) { + if (Symbol * + cb{GetContext().scope.FindCommonBlockInVisibleScopes(name->source)}) { + name->symbol = cb; + return cb; + } } return nullptr; } @@ -1783,8 +1778,8 @@ void AccAttributeVisitor::ResolveAccObject( } } else { context_.Say(name.source, - "COMMON block must be declared in the same scoping unit " - "in which the OpenACC directive or clause appears"_err_en_US); + "Could not find COMMON block '%s' used in OpenACC directive"_err_en_US, + name.ToString()); } }, }, diff --git a/flang/lib/Semantics/resolve-names.cpp b/flang/lib/Semantics/resolve-names.cpp index 88cc446..db75437 100644 --- a/flang/lib/Semantics/resolve-names.cpp +++ b/flang/lib/Semantics/resolve-names.cpp @@ -3645,6 +3645,20 @@ void ModuleVisitor::Post(const parser::UseStmt &x) { } } } + // Go through the list of COMMON block symbols in the module scope and add + // their USE association to the current scope's USE-associated COMMON blocks. + for (const auto &[name, symbol] : useModuleScope_->commonBlocks()) { + if (!currScope().FindCommonBlockInVisibleScopes(name)) { + currScope().AddCommonBlockUse( + name, symbol->attrs(), symbol->GetUltimate()); + } + } + // Go through the list of USE-associated COMMON block symbols in the module + // scope and add USE associations to their ultimate symbols to the current + // scope's USE-associated COMMON blocks. + for (const auto &[name, symbol] : useModuleScope_->commonBlockUses()) { + currScope().AddCommonBlockUse(name, symbol->attrs(), symbol->GetUltimate()); + } useModuleScope_ = nullptr; } diff --git a/flang/lib/Semantics/scope.cpp b/flang/lib/Semantics/scope.cpp index 4af371f..ab75d4c 100644 --- a/flang/lib/Semantics/scope.cpp +++ b/flang/lib/Semantics/scope.cpp @@ -144,9 +144,8 @@ void Scope::add_crayPointer(const SourceName &name, Symbol &pointer) { } Symbol &Scope::MakeCommonBlock(SourceName name, SourceName location) { - const auto it{commonBlocks_.find(name)}; - if (it != commonBlocks_.end()) { - return *it->second; + if (auto *cb{FindCommonBlock(name)}) { + return *cb; } else { Symbol &symbol{MakeSymbol( name, Attrs{}, CommonBlockDetails{name.empty() ? location : name})}; @@ -154,9 +153,25 @@ Symbol &Scope::MakeCommonBlock(SourceName name, SourceName location) { return symbol; } } -Symbol *Scope::FindCommonBlock(const SourceName &name) const { - const auto it{commonBlocks_.find(name)}; - return it != commonBlocks_.end() ? &*it->second : nullptr; + +Symbol *Scope::FindCommonBlockInVisibleScopes(const SourceName &name) const { + if (Symbol * cb{FindCommonBlock(name)}) { + return cb; + } else if (Symbol * cb{FindCommonBlockUse(name)}) { + return &cb->GetUltimate(); + } else if (IsSubmodule()) { + if (const Scope *parent{ + symbol_ ? symbol_->get<ModuleDetails>().parent() : nullptr}) { + if (auto *cb{parent->FindCommonBlockInVisibleScopes(name)}) { + return cb; + } + } + } else if (!IsTopLevel() && parent_) { + if (auto *cb{parent_->FindCommonBlockInVisibleScopes(name)}) { + return cb; + } + } + return nullptr; } Scope *Scope::FindSubmodule(const SourceName &name) const { @@ -167,6 +182,31 @@ Scope *Scope::FindSubmodule(const SourceName &name) const { return &*it->second; } } + +bool Scope::AddCommonBlockUse( + const SourceName &name, Attrs attrs, Symbol &cbUltimate) { + CHECK(cbUltimate.has<CommonBlockDetails>()); + // Make a symbol, but don't add it to the Scope, since it needs to + // be added to the USE-associated COMMON blocks + Symbol &useCB{MakeSymbol(name, attrs, UseDetails{name, cbUltimate})}; + return commonBlockUses_.emplace(name, useCB).second; +} + +Symbol *Scope::FindCommonBlock(const SourceName &name) const { + if (const auto it{commonBlocks_.find(name)}; it != commonBlocks_.end()) { + return &*it->second; + } + return nullptr; +} + +Symbol *Scope::FindCommonBlockUse(const SourceName &name) const { + if (const auto it{commonBlockUses_.find(name)}; + it != commonBlockUses_.end()) { + return &*it->second; + } + return nullptr; +} + bool Scope::AddSubmodule(const SourceName &name, Scope &submodule) { return submodules_.emplace(name, submodule).second; } diff --git a/flang/test/Fir/assumed-size-ops-codegen.fir b/flang/test/Fir/assumed-size-ops-codegen.fir new file mode 100644 index 0000000..54e9b3c --- /dev/null +++ b/flang/test/Fir/assumed-size-ops-codegen.fir @@ -0,0 +1,19 @@ +// RUN: fir-opt --fir-to-llvm-ir="target=x86_64-unknown-linux-gnu" %s | FileCheck %s + +// CHECK-LABEL: @assumed_size_extent( +// CHECK: %[[CNEG1:.*]] = llvm.mlir.constant(-1 : i64) +// CHECK: llvm.return %[[CNEG1]] : i64 +func.func @assumed_size_extent() -> index { + %e = fir.assumed_size_extent : index + return %e : index +} + +// CHECK-LABEL: @is_assumed_size_extent( +// CHECK: %[[NEG1:.*]] = llvm.mlir.constant(-1 : i64) +// CHECK: %[[CMP:.*]] = llvm.icmp "eq" +// CHECK: llvm.return %[[CMP]] : i1 +func.func @is_assumed_size_extent(%x: index) -> i1 { + %c = fir.is_assumed_size_extent %x : (index) -> i1 + return %c : i1 +} + diff --git a/flang/test/Fir/assumed-size-ops-folding.fir b/flang/test/Fir/assumed-size-ops-folding.fir new file mode 100644 index 0000000..9fd5fab --- /dev/null +++ b/flang/test/Fir/assumed-size-ops-folding.fir @@ -0,0 +1,13 @@ +// RUN: fir-opt --canonicalize %s | FileCheck %s + +// Verify: fir.is_assumed_size_extent(fir.assumed_size_extent) folds to i1 true. + +// CHECK-LABEL: func.func @fold( +func.func @fold() -> i1 { + %e = fir.assumed_size_extent : index + // CHECK: %[[C:.*]] = arith.constant true + %t = fir.is_assumed_size_extent %e : (index) -> i1 + return %t : i1 +} + + diff --git a/flang/test/Fir/assumed-size-ops-roundtrip.fir b/flang/test/Fir/assumed-size-ops-roundtrip.fir new file mode 100644 index 0000000..c3c1883 --- /dev/null +++ b/flang/test/Fir/assumed-size-ops-roundtrip.fir @@ -0,0 +1,13 @@ +// RUN: fir-opt %s | fir-opt | FileCheck %s + +func.func @roundtrip() { + // CHECK: %[[E:.*]] = fir.assumed_size_extent : index + %e = fir.assumed_size_extent : index + + // CHECK: %[[T:.*]] = fir.is_assumed_size_extent %[[E]] : (index) -> i1 + %t = fir.is_assumed_size_extent %e : (index) -> i1 + + return +} + + diff --git a/flang/test/HLFIR/assumed-type-actual-args.f90 b/flang/test/HLFIR/assumed-type-actual-args.f90 index 42e9ed2..aaac98b 100644 --- a/flang/test/HLFIR/assumed-type-actual-args.f90 +++ b/flang/test/HLFIR/assumed-type-actual-args.f90 @@ -113,7 +113,7 @@ end subroutine ! CHECK-LABEL: func.func @_QPtest2( ! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<?xnone>> {fir.bindc_name = "x"}) { ! CHECK: %[[DSCOPE:.*]] = fir.dummy_scope : !fir.dscope -! CHECK: %[[VAL_1:.*]] = arith.constant -1 : index +! CHECK: %[[VAL_1:.*]] = fir.assumed_size_extent : index ! CHECK: %[[VAL_2:.*]] = fir.shape %[[VAL_1]] : (index) -> !fir.shape<1> ! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_2]]) dummy_scope %[[DSCOPE]] {uniq_name = "_QFtest2Ex"} : (!fir.ref<!fir.array<?xnone>>, !fir.shape<1>, !fir.dscope) -> (!fir.box<!fir.array<?xnone>>, !fir.ref<!fir.array<?xnone>>) ! CHECK: fir.call @_QPs2(%[[VAL_3]]#1) fastmath<contract> : (!fir.ref<!fir.array<?xnone>>) -> () diff --git a/flang/test/Lower/HLFIR/assumed-rank-iface.f90 b/flang/test/Lower/HLFIR/assumed-rank-iface.f90 index 9ecbb7c..ffb36fa 100644 --- a/flang/test/Lower/HLFIR/assumed-rank-iface.f90 +++ b/flang/test/Lower/HLFIR/assumed-rank-iface.f90 @@ -145,7 +145,7 @@ end subroutine ! CHECK: %[[VAL_3:.*]] = arith.constant 0 : index ! CHECK: %[[VAL_4:.*]] = arith.cmpi sgt, %[[VAL_2]], %[[VAL_3]] : index ! CHECK: %[[VAL_5:.*]] = arith.select %[[VAL_4]], %[[VAL_2]], %[[VAL_3]] : index -! CHECK: %[[VAL_6:.*]] = arith.constant -1 : index +! CHECK: %[[VAL_6:.*]] = fir.assumed_size_extent : index ! CHECK: %[[VAL_7:.*]] = fir.shape %[[VAL_5]], %[[VAL_6]] : (index, index) -> !fir.shape<2> ! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_7]]) dummy_scope %{{[0-9]+}} {uniq_name = "_QFint_r2_assumed_size_to_assumed_rankEx"} : (!fir.ref<!fir.array<10x?xi32>>, !fir.shape<2>, !fir.dscope) -> (!fir.box<!fir.array<10x?xi32>>, !fir.ref<!fir.array<10x?xi32>>) ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]]#0 : (!fir.box<!fir.array<10x?xi32>>) -> !fir.box<!fir.array<*:i32>> diff --git a/flang/test/Lower/HLFIR/select-rank.f90 b/flang/test/Lower/HLFIR/select-rank.f90 index 0f80c72..f1f968de 100644 --- a/flang/test/Lower/HLFIR/select-rank.f90 +++ b/flang/test/Lower/HLFIR/select-rank.f90 @@ -371,7 +371,7 @@ end subroutine ! CHECK: fir.call @_QPr1(%[[VAL_11]]#0) fastmath<contract> : (!fir.box<!fir.array<?xf32>>) -> () ! CHECK: cf.br ^bb6 ! CHECK: ^bb5: -! CHECK: %[[VAL_12:.*]] = arith.constant -1 : index +! CHECK: %[[VAL_12:.*]] = fir.assumed_size_extent : index ! CHECK: %[[VAL_13:.*]] = fir.box_addr %[[VAL_2]]#1 : (!fir.box<!fir.array<*:f32>>) -> !fir.ref<!fir.array<*:f32>> ! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_13]] : (!fir.ref<!fir.array<*:f32>>) -> !fir.ref<!fir.array<?xf32>> ! CHECK: %[[VAL_15:.*]] = fir.shape %[[VAL_12]] : (index) -> !fir.shape<1> @@ -435,7 +435,7 @@ end subroutine ! CHECK: fir.call @_QPrdefault(%[[VAL_8]]#0) fastmath<contract> : (!fir.box<!fir.array<*:f32>>) -> () ! CHECK: cf.br ^bb5 ! CHECK: ^bb4: -! CHECK: %[[VAL_9:.*]] = arith.constant -1 : index +! CHECK: %[[VAL_9:.*]] = fir.assumed_size_extent : index ! CHECK: %[[VAL_10:.*]] = fir.box_addr %[[VAL_2]]#1 : (!fir.box<!fir.array<*:f32>>) -> !fir.ref<!fir.array<*:f32>> ! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_10]] : (!fir.ref<!fir.array<*:f32>>) -> !fir.ref<!fir.array<?xf32>> ! CHECK: %[[VAL_12:.*]] = fir.shape %[[VAL_9]] : (index) -> !fir.shape<1> @@ -482,7 +482,7 @@ end subroutine ! CHECK: fir.call @_QPr1_implicit(%[[VAL_21]]#1) fastmath<contract> : (!fir.ref<!fir.array<?xf32>>) -> () ! CHECK: cf.br ^bb6 ! CHECK: ^bb5: -! CHECK: %[[VAL_22:.*]] = arith.constant -1 : index +! CHECK: %[[VAL_22:.*]] = fir.assumed_size_extent : index ! CHECK: %[[VAL_23:.*]] = fir.box_addr %[[VAL_2]]#1 : (!fir.box<!fir.array<*:f32>>) -> !fir.ref<!fir.array<*:f32>> ! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_23]] : (!fir.ref<!fir.array<*:f32>>) -> !fir.ref<!fir.array<?xf32>> ! CHECK: %[[VAL_25:.*]] = fir.shape %[[VAL_22]] : (index) -> !fir.shape<1> @@ -534,7 +534,7 @@ end subroutine ! CHECK: fir.call @_QPrc1_implicit(%[[VAL_26]]) fastmath<contract> : (!fir.boxchar<1>) -> () ! CHECK: cf.br ^bb6 ! CHECK: ^bb5: -! CHECK: %[[VAL_27:.*]] = arith.constant -1 : index +! CHECK: %[[VAL_27:.*]] = fir.assumed_size_extent : index ! CHECK: %[[VAL_28:.*]] = fir.box_addr %[[VAL_8]]#1 : (!fir.box<!fir.array<*:!fir.char<1,?>>>) -> !fir.ref<!fir.array<*:!fir.char<1,?>>> ! CHECK: %[[VAL_29:.*]] = fir.convert %[[VAL_28]] : (!fir.ref<!fir.array<*:!fir.char<1,?>>>) -> !fir.ref<!fir.array<?x!fir.char<1,?>>> ! CHECK: %[[VAL_30:.*]] = fir.shape %[[VAL_27]] : (index) -> !fir.shape<1> diff --git a/flang/test/Lower/Intrinsics/lbound.f90 b/flang/test/Lower/Intrinsics/lbound.f90 index a5ca2d3..75c11ff 100644 --- a/flang/test/Lower/Intrinsics/lbound.f90 +++ b/flang/test/Lower/Intrinsics/lbound.f90 @@ -40,7 +40,7 @@ end subroutine subroutine lbound_test_3(a, dim, res) real, dimension(2:10, 3:*) :: a integer(8):: dim, res -! CHECK: %[[VAL_0:.*]] = arith.constant -1 : index +! CHECK: %[[VAL_0:.*]] = fir.assumed_size_extent : index ! CHECK: %[[VAL_1:.*]] = fir.load %arg1 : !fir.ref<i64> ! CHECK: %[[VAL_2:.*]] = fir.shape_shift %{{.*}}, %{{.*}}, %{{.*}}, %[[VAL_0]] : (index, index, index, index) -> !fir.shapeshift<2> ! CHECK: %[[VAL_3:.*]] = fir.embox %arg0(%[[VAL_2]]) : (!fir.ref<!fir.array<9x?xf32>>, !fir.shapeshift<2>) -> !fir.box<!fir.array<9x?xf32>> diff --git a/flang/test/Lower/Intrinsics/ubound.f90 b/flang/test/Lower/Intrinsics/ubound.f90 index dae21ac..bc8cff8 100644 --- a/flang/test/Lower/Intrinsics/ubound.f90 +++ b/flang/test/Lower/Intrinsics/ubound.f90 @@ -48,7 +48,7 @@ end subroutine subroutine ubound_test_3(a, dim, res) real, dimension(10, 20, *) :: a integer(8):: dim, res -! CHECK: %[[VAL_0:.*]] = arith.constant -1 : index +! CHECK: %[[VAL_0:.*]] = fir.assumed_size_extent : index ! CHECK: %[[VAL_1:.*]] = fir.shape %{{.*}}, %{{.*}}, %[[VAL_0]] : (index, index, index) -> !fir.shape<3> ! CHECK: %[[VAL_2:.*]] = fir.embox %{{.*}}(%[[VAL_1]]) : (!fir.ref<!fir.array<10x20x?xf32>>, !fir.shape<3>) -> !fir.box<!fir.array<10x20x?xf32>> ! CHECK: %[[VAL_3:.*]] = fir.load %{{.*}} : !fir.ref<i64> diff --git a/flang/test/Lower/OpenACC/acc-data-operands-remapping-common.f90 b/flang/test/Lower/OpenACC/acc-data-operands-remapping-common.f90 new file mode 100644 index 0000000..1ab883e --- /dev/null +++ b/flang/test/Lower/OpenACC/acc-data-operands-remapping-common.f90 @@ -0,0 +1,43 @@ +! Test remapping of common blocks appearing in OpenACC data directives. + +! RUN: bbc -fopenacc -emit-hlfir %s -o - | FileCheck %s + +subroutine test + real :: x(100), y(100), overlap1(100), overlap2(100) + equivalence (x(50), overlap1) + equivalence (x(40), overlap2) + common /comm/ x, y + !$acc declare link(/comm/) + !$acc parallel loop copyin(/comm/) + do i = 1, 100 + x(i) = overlap1(i)*2+ overlap2(i) + enddo +end subroutine +! CHECK-LABEL: func.func @_QPtest() { +! CHECK: %[[ADDRESS_OF_0:.*]] = fir.address_of(@comm_) +! CHECK: %[[COPYIN_0:.*]] = acc.copyin varPtr(%[[ADDRESS_OF_0]] : !fir.ref<!fir.array<800xi8>>) -> !fir.ref<!fir.array<800xi8>> {name = "comm"} +! CHECK: acc.parallel combined(loop) dataOperands(%[[COPYIN_0]] : !fir.ref<!fir.array<800xi8>>) { +! CHECK: %[[CONSTANT_8:.*]] = arith.constant 196 : index +! CHECK: %[[COORDINATE_OF_4:.*]] = fir.coordinate_of %[[COPYIN_0]], %{{.*}} : (!fir.ref<!fir.array<800xi8>>, index) -> !fir.ref<i8> +! CHECK: %[[CONVERT_4:.*]] = fir.convert %[[COORDINATE_OF_4]] : (!fir.ref<i8>) -> !fir.ptr<!fir.array<100xf32>> +! CHECK: %[[SHAPE_4:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> +! CHECK: %[[DECLARE_5:.*]]:2 = hlfir.declare %[[CONVERT_4]](%[[SHAPE_4]]) storage(%[[COPYIN_0]][196]) {uniq_name = "_QFtestEoverlap1"} : (!fir.ptr<!fir.array<100xf32>>, !fir.shape<1>, !fir.ref<!fir.array<800xi8>>) -> (!fir.ptr<!fir.array<100xf32>>, !fir.ptr<!fir.array<100xf32>>) +! CHECK: %[[CONSTANT_9:.*]] = arith.constant 156 : index +! CHECK: %[[COORDINATE_OF_5:.*]] = fir.coordinate_of %[[COPYIN_0]], %{{.*}} : (!fir.ref<!fir.array<800xi8>>, index) -> !fir.ref<i8> +! CHECK: %[[CONVERT_5:.*]] = fir.convert %[[COORDINATE_OF_5]] : (!fir.ref<i8>) -> !fir.ptr<!fir.array<100xf32>> +! CHECK: %[[SHAPE_5:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> +! CHECK: %[[DECLARE_6:.*]]:2 = hlfir.declare %[[CONVERT_5]](%[[SHAPE_5]]) storage(%[[COPYIN_0]][156]) {uniq_name = "_QFtestEoverlap2"} : (!fir.ptr<!fir.array<100xf32>>, !fir.shape<1>, !fir.ref<!fir.array<800xi8>>) -> (!fir.ptr<!fir.array<100xf32>>, !fir.ptr<!fir.array<100xf32>>) +! CHECK: %[[CONSTANT_10:.*]] = arith.constant 0 : index +! CHECK: %[[COORDINATE_OF_6:.*]] = fir.coordinate_of %[[COPYIN_0]], %{{.*}} : (!fir.ref<!fir.array<800xi8>>, index) -> !fir.ref<i8> +! CHECK: %[[CONVERT_6:.*]] = fir.convert %[[COORDINATE_OF_6]] : (!fir.ref<i8>) -> !fir.ptr<!fir.array<100xf32>> +! CHECK: %[[SHAPE_6:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> +! CHECK: %[[DECLARE_7:.*]]:2 = hlfir.declare %[[CONVERT_6]](%[[SHAPE_6]]) storage(%[[COPYIN_0]][0]) {uniq_name = "_QFtestEx"} : (!fir.ptr<!fir.array<100xf32>>, !fir.shape<1>, !fir.ref<!fir.array<800xi8>>) -> (!fir.ptr<!fir.array<100xf32>>, !fir.ptr<!fir.array<100xf32>>) +! CHECK: %[[CONSTANT_11:.*]] = arith.constant 400 : index +! CHECK: %[[COORDINATE_OF_7:.*]] = fir.coordinate_of %[[COPYIN_0]], %{{.*}} : (!fir.ref<!fir.array<800xi8>>, index) -> !fir.ref<i8> +! CHECK: %[[CONVERT_7:.*]] = fir.convert %[[COORDINATE_OF_7]] : (!fir.ref<i8>) -> !fir.ref<!fir.array<100xf32>> +! CHECK: %[[SHAPE_7:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> +! CHECK: %[[DECLARE_8:.*]]:2 = hlfir.declare %[[CONVERT_7]](%[[SHAPE_7]]) storage(%[[COPYIN_0]][400]) {uniq_name = "_QFtestEy"} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>, !fir.ref<!fir.array<800xi8>>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>) +! CHECK: acc.loop combined(parallel) +! CHECK: %[[DESIGNATE_0:.*]] = hlfir.designate %[[DECLARE_5]]#0 +! CHECK: %[[DESIGNATE_1:.*]] = hlfir.designate %[[DECLARE_6]]#0 +! CHECK: %[[DESIGNATE_2:.*]] = hlfir.designate %[[DECLARE_7]]#0 diff --git a/flang/test/Lower/array-expression-assumed-size.f90 b/flang/test/Lower/array-expression-assumed-size.f90 index 2fbf315..a498148 100644 --- a/flang/test/Lower/array-expression-assumed-size.f90 +++ b/flang/test/Lower/array-expression-assumed-size.f90 @@ -19,7 +19,7 @@ end subroutine assumed_size_forall_test ! CHECK: %[[VAL_1A:.*]] = fir.convert %c10{{.*}} : (i64) -> index ! CHECK: %[[VAL_1B:.*]] = arith.cmpi sgt, %[[VAL_1A]], %c0{{.*}} : index ! CHECK: %[[VAL_1:.*]] = arith.select %[[VAL_1B]], %[[VAL_1A]], %c0{{.*}} : index -! CHECK: %[[VAL_2:.*]] = arith.constant -1 : index +! CHECK: %[[VAL_2:.*]] = fir.assumed_size_extent : index ! CHECK: %[[VAL_3:.*]] = arith.constant 1 : index ! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i64 ! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_4]] : (i64) -> index @@ -82,7 +82,7 @@ end subroutine assumed_size_forall_test ! CHECK: %[[VAL_2A:.*]] = fir.convert %c10{{.*}} : (i64) -> index ! CHECK: %[[VAL_2B:.*]] = arith.cmpi sgt, %[[VAL_2A]], %c0{{.*}} : index ! CHECK: %[[VAL_2:.*]] = arith.select %[[VAL_2B]], %[[VAL_2A]], %c0{{.*}} : index -! CHECK: %[[VAL_3:.*]] = arith.constant -1 : index +! CHECK: %[[VAL_3:.*]] = fir.assumed_size_extent : index ! CHECK: %[[VAL_4:.*]] = arith.constant 2 : i32 ! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_4]] : (i32) -> index ! CHECK: %[[VAL_6:.*]] = arith.constant 6 : i32 @@ -149,7 +149,7 @@ end subroutine assumed_size_forall_test ! PostOpt-DAG: %[[VAL_4:.*]] = arith.constant 0 : index ! PostOpt-DAG: %[[VAL_5:.*]] = arith.constant 3 : index ! PostOpt-DAG: %[[VAL_6:.*]] = arith.constant 4 : index -! PostOpt-DAG: %[[VAL_7:.*]] = arith.constant -1 : index +! PostOpt-DAG: %[[VAL_7:.*]] = fir.assumed_size_extent : index ! PostOpt: %[[VAL_8:.*]] = fir.shape %[[VAL_1]], %[[VAL_7]] : (index, index) -> !fir.shape<2> ! PostOpt: %[[VAL_9:.*]] = fir.slice %[[VAL_2]], %[[VAL_1]], %[[VAL_2]], %[[VAL_2]], %[[VAL_3]], %[[VAL_2]] : (index, index, index, index, index, index) -> !fir.slice<2> ! PostOpt: %[[VAL_10:.*]] = fir.allocmem !fir.array<10x?xi32>, %[[VAL_3]] @@ -227,8 +227,8 @@ end subroutine assumed_size_forall_test ! PostOpt-DAG: %[[VAL_4:.*]] = arith.constant 1 : index ! PostOpt-DAG: %[[VAL_5:.*]] = arith.constant 0 : index ! PostOpt-DAG: %[[VAL_6:.*]] = arith.constant 5 : index -! PostOpt-DAG: %[[VAL_8:.*]] = arith.constant -1 : index ! PostOpt: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, bindc_name = "i"} +! PostOpt: %[[VAL_8:.*]] = fir.assumed_size_extent : index ! PostOpt: %[[VAL_9:.*]] = fir.shape %[[VAL_2]], %[[VAL_8]] : (index, index) -> !fir.shape<2> ! PostOpt: %[[VAL_10:.*]] = fir.allocmem !fir.array<10x?xi32>, %[[VAL_4]] ! PostOpt: br ^bb1(%[[VAL_5]], %[[VAL_4]] : index, index) diff --git a/flang/test/Lower/entry-statement.f90 b/flang/test/Lower/entry-statement.f90 index 83d2d32..f1e535a 100644 --- a/flang/test/Lower/entry-statement.f90 +++ b/flang/test/Lower/entry-statement.f90 @@ -491,7 +491,7 @@ end subroutine ! CHECK-LABEL: func.func @_QPentry_with_assumed_size( ! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.array<?xf32>> {fir.bindc_name = "x"}) { ! CHECK: %[[VAL_1:.*]] = fir.dummy_scope : !fir.dscope -! CHECK: %[[VAL_2:.*]] = arith.constant -1 : index +! CHECK: %[[VAL_2:.*]] = fir.assumed_size_extent : index ! CHECK: %[[VAL_3:.*]] = fir.shape %[[VAL_2]] : (index) -> !fir.shape<1> ! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_3]]) dummy_scope %[[VAL_1]] {uniq_name = "_QFassumed_sizeEx"} : (!fir.ref<!fir.array<?xf32>>, !fir.shape<1>, !fir.dscope) -> (!fir.box<!fir.array<?xf32>>, !fir.ref<!fir.array<?xf32>>) ! CHECK: cf.br ^bb1 diff --git a/flang/test/Semantics/OpenACC/acc-common.f90 b/flang/test/Semantics/OpenACC/acc-common.f90 new file mode 100644 index 0000000..31c4d19 --- /dev/null +++ b/flang/test/Semantics/OpenACC/acc-common.f90 @@ -0,0 +1,41 @@ +! RUN: %python %S/../test_errors.py %s %flang -fopenacc +module acc_common_decl + implicit none + integer a + common /a_common/ a +!$acc declare create (/a_common/) + data a/42/ +end module acc_common_decl + +module acc_common_another + implicit none + integer c, d + common /a_common/ c, d +!$acc declare create (/a_common/) +end module acc_common_another + +module acc_common_intermediate + use acc_common_decl + implicit none + integer b + common /b_common/ b +!$acc declare create (/b_common/) +end module acc_common_intermediate + +program acc_decl_test + use acc_common_intermediate + use acc_common_another + implicit none + + a = 1 + b = 10 +!$acc update device (/a_common/) + a = 2 +!$acc update device (/b_common/) + b = 20 +!$acc update device (/a_common/) + c = 3 + d = 30 +!ERROR: Could not find COMMON block 'a_common_bad' used in OpenACC directive +!$acc update device (/a_common_bad/) +end program diff --git a/libc/src/__support/math/atan2f.h b/libc/src/__support/math/atan2f.h index e3b1932..0133d12 100644 --- a/libc/src/__support/math/atan2f.h +++ b/libc/src/__support/math/atan2f.h @@ -18,9 +18,11 @@ #include "src/__support/FPUtil/nearest_integer.h" #include "src/__support/macros/config.h" #include "src/__support/macros/optimization.h" // LIBC_UNLIKELY +#include "src/__support/macros/properties/cpu_features.h" // LIBC_TARGET_CPU_HAS_FMA #if defined(LIBC_MATH_HAS_SKIP_ACCURATE_PASS) && \ - defined(LIBC_MATH_HAS_INTERMEDIATE_COMP_IN_FLOAT) + defined(LIBC_MATH_HAS_INTERMEDIATE_COMP_IN_FLOAT) && \ + defined(LIBC_TARGET_CPU_HAS_FMA_FLOAT) // We use float-float implementation to reduce size. #include "atan2f_float.h" diff --git a/libc/test/UnitTest/FPMatcher.h b/libc/test/UnitTest/FPMatcher.h index f74276f..430727e 100644 --- a/libc/test/UnitTest/FPMatcher.h +++ b/libc/test/UnitTest/FPMatcher.h @@ -16,6 +16,7 @@ #include "src/__support/FPUtil/fpbits_str.h" #include "src/__support/libc_errno.h" #include "src/__support/macros/config.h" +#include "src/__support/macros/optimization.h" #include "src/__support/macros/properties/architectures.h" #include "test/UnitTest/ErrnoCheckingTest.h" #include "test/UnitTest/RoundingModeUtils.h" @@ -294,42 +295,46 @@ private: #define EXPECT_MATH_ERRNO(expected) \ do { \ - if (math_errhandling & MATH_ERRNO) { \ - int actual = libc_errno; \ - libc_errno = 0; \ - EXPECT_EQ(actual, expected); \ - } \ + if ((LIBC_MATH & LIBC_MATH_NO_ERRNO) == 0) \ + if (math_errhandling & MATH_ERRNO) { \ + int actual = libc_errno; \ + libc_errno = 0; \ + EXPECT_EQ(actual, expected); \ + } \ } while (0) #define ASSERT_MATH_ERRNO(expected) \ do { \ - if (math_errhandling & MATH_ERRNO) { \ - int actual = libc_errno; \ - libc_errno = 0; \ - ASSERT_EQ(actual, expected); \ - } \ + if ((LIBC_MATH & LIBC_MATH_NO_ERRNO) == 0) \ + if (math_errhandling & MATH_ERRNO) { \ + int actual = libc_errno; \ + libc_errno = 0; \ + ASSERT_EQ(actual, expected); \ + } \ } while (0) #define EXPECT_FP_EXCEPTION(expected) \ do { \ - if (math_errhandling & MATH_ERREXCEPT) { \ - EXPECT_EQ( \ - LIBC_NAMESPACE::fputil::test_except( \ - static_cast<int>(FE_ALL_EXCEPT)) & \ - ((expected) ? (expected) : static_cast<int>(FE_ALL_EXCEPT)), \ - (expected)); \ - } \ + if ((LIBC_MATH & LIBC_MATH_NO_EXCEPT) == 0) \ + if (math_errhandling & MATH_ERREXCEPT) { \ + EXPECT_EQ( \ + LIBC_NAMESPACE::fputil::test_except( \ + static_cast<int>(FE_ALL_EXCEPT)) & \ + ((expected) ? (expected) : static_cast<int>(FE_ALL_EXCEPT)), \ + (expected)); \ + } \ } while (0) #define ASSERT_FP_EXCEPTION(expected) \ do { \ - if (math_errhandling & MATH_ERREXCEPT) { \ - ASSERT_EQ( \ - LIBC_NAMESPACE::fputil::test_except( \ - static_cast<int>(FE_ALL_EXCEPT)) & \ - ((expected) ? (expected) : static_cast<int>(FE_ALL_EXCEPT)), \ - (expected)); \ - } \ + if ((LIBC_MATH & LIBC_MATH_NO_EXCEPT) == 0) \ + if (math_errhandling & MATH_ERREXCEPT) { \ + ASSERT_EQ( \ + LIBC_NAMESPACE::fputil::test_except( \ + static_cast<int>(FE_ALL_EXCEPT)) & \ + ((expected) ? (expected) : static_cast<int>(FE_ALL_EXCEPT)), \ + (expected)); \ + } \ } while (0) #define EXPECT_FP_EQ_WITH_EXCEPTION(expected_val, actual_val, expected_except) \ diff --git a/libc/test/src/math/acos_test.cpp b/libc/test/src/math/acos_test.cpp index 1404887..8678fe6 100644 --- a/libc/test/src/math/acos_test.cpp +++ b/libc/test/src/math/acos_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/acos.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 8 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcAcosTest = LIBC_NAMESPACE::testing::FPTest<double>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -46,7 +53,7 @@ TEST_F(LlvmLibcAcosTest, InDoubleRange) { ++count; if (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Acos, x, result, - 0.5, rounding_mode)) { + TOLERANCE + 0.5, rounding_mode)) { ++fails; while (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Acos, x, result, tol, rounding_mode)) { diff --git a/libc/test/src/math/acosf16_test.cpp b/libc/test/src/math/acosf16_test.cpp index f4890c81..ca33550 100644 --- a/libc/test/src/math/acosf16_test.cpp +++ b/libc/test/src/math/acosf16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/acosf16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcAcosf16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -28,7 +35,7 @@ TEST_F(LlvmLibcAcosf16Test, PositiveRange) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Acos, x, - LIBC_NAMESPACE::acosf16(x), 0.5); + LIBC_NAMESPACE::acosf16(x), TOLERANCE + 0.5); } } @@ -37,6 +44,6 @@ TEST_F(LlvmLibcAcosf16Test, NegativeRange) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Acos, x, - LIBC_NAMESPACE::acosf16(x), 0.5); + LIBC_NAMESPACE::acosf16(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/acosf_test.cpp b/libc/test/src/math/acosf_test.cpp index 3b45749..40b8702 100644 --- a/libc/test/src/math/acosf_test.cpp +++ b/libc/test/src/math/acosf_test.cpp @@ -10,11 +10,18 @@ #include "hdr/math_macros.h" #include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/acosf.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + namespace mpfr = LIBC_NAMESPACE::testing::mpfr; using LlvmLibcAcosfTest = LIBC_NAMESPACE::testing::FPTest<float>; @@ -72,8 +79,8 @@ TEST_F(LlvmLibcAcosfTest, SpecificBitPatterns) { for (int i = 0; i < N; ++i) { float x = FPBits(INPUTS[i]).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Acos, x, - LIBC_NAMESPACE::acosf(x), 0.5); + LIBC_NAMESPACE::acosf(x), TOLERANCE + 0.5); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Acos, -x, - LIBC_NAMESPACE::acosf(-x), 0.5); + LIBC_NAMESPACE::acosf(-x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/acoshf_test.cpp b/libc/test/src/math/acoshf_test.cpp index 506f176..a0e9b39 100644 --- a/libc/test/src/math/acoshf_test.cpp +++ b/libc/test/src/math/acoshf_test.cpp @@ -10,11 +10,18 @@ #include "hdr/math_macros.h" #include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/acoshf.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcAcoshfTest = LIBC_NAMESPACE::testing::FPTest<float>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -68,6 +75,6 @@ TEST_F(LlvmLibcAcoshfTest, SpecificBitPatterns) { for (int i = 0; i < N; ++i) { float x = FPBits(INPUTS[i]).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Acosh, x, - LIBC_NAMESPACE::acoshf(x), 0.5); + LIBC_NAMESPACE::acoshf(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/asin_test.cpp b/libc/test/src/math/asin_test.cpp index 03ae963..4e36384 100644 --- a/libc/test/src/math/asin_test.cpp +++ b/libc/test/src/math/asin_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/asin.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 6 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcAsinTest = LIBC_NAMESPACE::testing::FPTest<double>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -47,7 +54,7 @@ TEST_F(LlvmLibcAsinTest, InDoubleRange) { ++count; if (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Asin, x, result, - 0.5, rounding_mode)) { + TOLERANCE + 0.5, rounding_mode)) { ++fails; while (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Asin, x, result, tol, rounding_mode)) { @@ -72,6 +79,7 @@ TEST_F(LlvmLibcAsinTest, InDoubleRange) { tlog << " Test Rounding To Nearest...\n"; test(mpfr::RoundingMode::Nearest); +#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS tlog << " Test Rounding Downward...\n"; test(mpfr::RoundingMode::Downward); @@ -80,4 +88,5 @@ TEST_F(LlvmLibcAsinTest, InDoubleRange) { tlog << " Test Rounding Toward Zero...\n"; test(mpfr::RoundingMode::TowardZero); +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS } diff --git a/libc/test/src/math/asinf_test.cpp b/libc/test/src/math/asinf_test.cpp index 824bc1e..20702c5 100644 --- a/libc/test/src/math/asinf_test.cpp +++ b/libc/test/src/math/asinf_test.cpp @@ -11,11 +11,18 @@ #include "hdr/math_macros.h" #include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/asinf.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcAsinfTest = LIBC_NAMESPACE::testing::FPTest<float>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -68,8 +75,8 @@ TEST_F(LlvmLibcAsinfTest, SpecificBitPatterns) { for (int i = 0; i < N; ++i) { float x = FPBits(INPUTS[i]).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Asin, x, - LIBC_NAMESPACE::asinf(x), 0.5); + LIBC_NAMESPACE::asinf(x), TOLERANCE + 0.5); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Asin, -x, - LIBC_NAMESPACE::asinf(-x), 0.5); + LIBC_NAMESPACE::asinf(-x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/asinhf16_test.cpp b/libc/test/src/math/asinhf16_test.cpp index 929d137..8d0f754 100644 --- a/libc/test/src/math/asinhf16_test.cpp +++ b/libc/test/src/math/asinhf16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/asinhf16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcAsinhf16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -28,7 +35,8 @@ TEST_F(LlvmLibcAsinhf16Test, PositiveRange) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Asinh, x, - LIBC_NAMESPACE::asinhf16(x), 0.5); + LIBC_NAMESPACE::asinhf16(x), + TOLERANCE + 0.5); } } @@ -37,6 +45,7 @@ TEST_F(LlvmLibcAsinhf16Test, NegativeRange) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Asinh, x, - LIBC_NAMESPACE::asinhf16(x), 0.5); + LIBC_NAMESPACE::asinhf16(x), + TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/asinhf_test.cpp b/libc/test/src/math/asinhf_test.cpp index 4681aad..9d9c042 100644 --- a/libc/test/src/math/asinhf_test.cpp +++ b/libc/test/src/math/asinhf_test.cpp @@ -9,11 +9,18 @@ #include "hdr/math_macros.h" #include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/asinhf.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcAsinhfTest = LIBC_NAMESPACE::testing::FPTest<float>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -68,8 +75,8 @@ TEST_F(LlvmLibcAsinhfTest, SpecificBitPatterns) { for (int i = 0; i < N; ++i) { float x = FPBits(INPUTS[i]).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Asinh, x, - LIBC_NAMESPACE::asinhf(x), 0.5); + LIBC_NAMESPACE::asinhf(x), TOLERANCE + 0.5); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Asinh, -x, - LIBC_NAMESPACE::asinhf(-x), 0.5); + LIBC_NAMESPACE::asinhf(-x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/atan2f_test.cpp b/libc/test/src/math/atan2f_test.cpp index 50ab382..56b6967 100644 --- a/libc/test/src/math/atan2f_test.cpp +++ b/libc/test/src/math/atan2f_test.cpp @@ -8,11 +8,18 @@ #include "hdr/math_macros.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/atan2f.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcAtan2fTest = LIBC_NAMESPACE::testing::FPTest<float>; using LIBC_NAMESPACE::testing::tlog; @@ -36,16 +43,20 @@ TEST_F(LlvmLibcAtan2fTest, TrickyInputs) { float x = INPUTS[i].x; float y = INPUTS[i].y; ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Atan2, INPUTS[i], - LIBC_NAMESPACE::atan2f(x, y), 0.5); + LIBC_NAMESPACE::atan2f(x, y), + TOLERANCE + 0.5); INPUTS[i].x = -INPUTS[i].x; ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Atan2, INPUTS[i], - LIBC_NAMESPACE::atan2f(-x, y), 0.5); + LIBC_NAMESPACE::atan2f(-x, y), + TOLERANCE + 0.5); INPUTS[i].y = -INPUTS[i].y; ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Atan2, INPUTS[i], - LIBC_NAMESPACE::atan2f(-x, -y), 0.5); + LIBC_NAMESPACE::atan2f(-x, -y), + TOLERANCE + 0.5); INPUTS[i].x = -INPUTS[i].x; ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Atan2, INPUTS[i], - LIBC_NAMESPACE::atan2f(x, -y), 0.5); + LIBC_NAMESPACE::atan2f(x, -y), + TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/atanf16_test.cpp b/libc/test/src/math/atanf16_test.cpp index fa383e7..e91133f 100644 --- a/libc/test/src/math/atanf16_test.cpp +++ b/libc/test/src/math/atanf16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/atanf16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcAtanf16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +34,7 @@ TEST_F(LlvmLibcAtanf16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Atan, x, - LIBC_NAMESPACE::atanf16(x), 0.5); + LIBC_NAMESPACE::atanf16(x), TOLERANCE + 0.5); } } @@ -35,6 +42,6 @@ TEST_F(LlvmLibcAtanf16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Atan, x, - LIBC_NAMESPACE::atanf16(x), 0.5); + LIBC_NAMESPACE::atanf16(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/cbrt_test.cpp b/libc/test/src/math/cbrt_test.cpp index 2e2de16..61b8583 100644 --- a/libc/test/src/math/cbrt_test.cpp +++ b/libc/test/src/math/cbrt_test.cpp @@ -8,11 +8,18 @@ #include "hdr/math_macros.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/cbrt.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcCbrtTest = LIBC_NAMESPACE::testing::FPTest<double>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -49,7 +56,7 @@ TEST_F(LlvmLibcCbrtTest, InDoubleRange) { ++tested; if (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Cbrt, x, result, - 0.5, rounding_mode)) { + TOLERANCE + 0.5, rounding_mode)) { ++fails; while (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Cbrt, x, result, ulp, rounding_mode)) { @@ -98,8 +105,8 @@ TEST_F(LlvmLibcCbrtTest, SpecialValues) { for (double v : INPUTS) { double x = FPBits(v).get_val(); ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Cbrt, x, - LIBC_NAMESPACE::cbrt(x), 0.5); + LIBC_NAMESPACE::cbrt(x), TOLERANCE + 0.5); ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Cbrt, -x, - LIBC_NAMESPACE::cbrt(-x), 0.5); + LIBC_NAMESPACE::cbrt(-x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/cos_test.cpp b/libc/test/src/math/cos_test.cpp index e2d4791..40c43f9 100644 --- a/libc/test/src/math/cos_test.cpp +++ b/libc/test/src/math/cos_test.cpp @@ -7,11 +7,18 @@ //===----------------------------------------------------------------------===// #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/cos.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcCosTest = LIBC_NAMESPACE::testing::FPTest<double>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -57,7 +64,7 @@ TEST_F(LlvmLibcCosTest, TrickyInputs) { for (int i = 0; i < N; ++i) { double x = INPUTS[i]; ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Cos, x, - LIBC_NAMESPACE::cos(x), 0.5); + LIBC_NAMESPACE::cos(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/cosf16_test.cpp b/libc/test/src/math/cosf16_test.cpp index b744e78..fe6f4ab 100644 --- a/libc/test/src/math/cosf16_test.cpp +++ b/libc/test/src/math/cosf16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/cosf16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcCosf16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +34,7 @@ TEST_F(LlvmLibcCosf16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Cos, x, - LIBC_NAMESPACE::cosf16(x), 0.5); + LIBC_NAMESPACE::cosf16(x), TOLERANCE + 0.5); } } @@ -35,6 +42,6 @@ TEST_F(LlvmLibcCosf16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Cos, x, - LIBC_NAMESPACE::cosf16(x), 0.5); + LIBC_NAMESPACE::cosf16(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/cosf_test.cpp b/libc/test/src/math/cosf_test.cpp index 6afaa42..d5d5dc3 100644 --- a/libc/test/src/math/cosf_test.cpp +++ b/libc/test/src/math/cosf_test.cpp @@ -10,12 +10,21 @@ #include "hdr/math_macros.h" #include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/cosf.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "test/src/math/sdcomp26094.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 3 +#define FP_ASSERT ASSERT_MPFR_MATCH +#else +#define TOLERANCE 0 +#define FP_ASSERT ASSERT_MPFR_MATCH_ALL_ROUNDING +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LIBC_NAMESPACE::testing::SDCOMP26094_VALUES; using LlvmLibcCosfTest = LIBC_NAMESPACE::testing::FPTest<float>; @@ -45,8 +54,8 @@ TEST_F(LlvmLibcCosfTest, InFloatRange) { float x = FPBits(v).get_val(); if (FPBits(v).is_nan() || FPBits(v).is_inf()) continue; - ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Cos, x, - LIBC_NAMESPACE::cosf(x), 0.5); + FP_ASSERT(mpfr::Operation::Cos, x, LIBC_NAMESPACE::cosf(x), + TOLERANCE + 0.5); } } @@ -99,10 +108,10 @@ TEST_F(LlvmLibcCosfTest, SpecificBitPatterns) { for (int i = 0; i < N; ++i) { float x = FPBits(INPUTS[i]).get_val(); - EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Cos, x, - LIBC_NAMESPACE::cosf(x), 0.5); - EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Cos, -x, - LIBC_NAMESPACE::cosf(-x), 0.5); + FP_ASSERT(mpfr::Operation::Cos, x, LIBC_NAMESPACE::cosf(x), + TOLERANCE + 0.5); + FP_ASSERT(mpfr::Operation::Cos, -x, LIBC_NAMESPACE::cosf(-x), + TOLERANCE + 0.5); } } @@ -111,6 +120,7 @@ TEST_F(LlvmLibcCosfTest, SpecificBitPatterns) { TEST_F(LlvmLibcCosfTest, SDCOMP_26094) { for (uint32_t v : SDCOMP26094_VALUES) { float x = FPBits(v).get_val(); - ASSERT_MPFR_MATCH(mpfr::Operation::Cos, x, LIBC_NAMESPACE::cosf(x), 0.5); + FP_ASSERT(mpfr::Operation::Cos, x, LIBC_NAMESPACE::cosf(x), + TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/coshf16_test.cpp b/libc/test/src/math/coshf16_test.cpp index a0d1fd2..8e8f55e 100644 --- a/libc/test/src/math/coshf16_test.cpp +++ b/libc/test/src/math/coshf16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/coshf16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcCoshf16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +34,7 @@ TEST_F(LlvmLibcCoshf16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Cosh, x, - LIBC_NAMESPACE::coshf16(x), 0.5); + LIBC_NAMESPACE::coshf16(x), TOLERANCE + 0.5); } } @@ -35,6 +42,6 @@ TEST_F(LlvmLibcCoshf16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Cosh, x, - LIBC_NAMESPACE::coshf16(x), 0.5); + LIBC_NAMESPACE::coshf16(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/erff_test.cpp b/libc/test/src/math/erff_test.cpp index c40aacf..17ff6c4 100644 --- a/libc/test/src/math/erff_test.cpp +++ b/libc/test/src/math/erff_test.cpp @@ -7,13 +7,19 @@ //===----------------------------------------------------------------------===// #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/erff.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcErffTest = LIBC_NAMESPACE::testing::FPTest<float>; @@ -37,9 +43,9 @@ TEST_F(LlvmLibcErffTest, TrickyInputs) { for (int i = 0; i < N; ++i) { float x = FPBits(INPUTS[i]).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Erf, x, - LIBC_NAMESPACE::erff(x), 0.5); + LIBC_NAMESPACE::erff(x), TOLERANCE + 0.5); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Erf, -x, - LIBC_NAMESPACE::erff(-x), 0.5); + LIBC_NAMESPACE::erff(-x), TOLERANCE + 0.5); } } @@ -83,10 +89,10 @@ TEST_F(LlvmLibcErffTest, InFloatRange) { } } } - tlog << " Log failed: " << fails << "/" << count << "/" << cc - << " tests.\n"; - tlog << " Max ULPs is at most: " << static_cast<uint64_t>(tol) << ".\n"; if (fails) { + tlog << " Log failed: " << fails << "/" << count << "/" << cc + << " tests.\n"; + tlog << " Max ULPs is at most: " << static_cast<uint64_t>(tol) << ".\n"; EXPECT_MPFR_MATCH(mpfr::Operation::Erf, mx, mr, 0.5, rounding_mode); } }; diff --git a/libc/test/src/math/exp10_test.cpp b/libc/test/src/math/exp10_test.cpp index 2ddcef0..4257dd0 100644 --- a/libc/test/src/math/exp10_test.cpp +++ b/libc/test/src/math/exp10_test.cpp @@ -7,13 +7,19 @@ //===----------------------------------------------------------------------===// #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/exp10.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcExp10Test = LIBC_NAMESPACE::testing::FPTest<double>; @@ -79,7 +85,7 @@ TEST_F(LlvmLibcExp10Test, TrickyInputs) { for (int i = 0; i < N; ++i) { double x = FPBits(INPUTS[i]).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp10, x, - LIBC_NAMESPACE::exp10(x), 0.5); + LIBC_NAMESPACE::exp10(x), TOLERANCE + 0.5); } } @@ -112,7 +118,7 @@ TEST_F(LlvmLibcExp10Test, InDoubleRange) { ++count; if (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Exp10, x, result, - 0.5, rounding_mode)) { + TOLERANCE + 0.5, rounding_mode)) { ++fails; while (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Exp10, x, result, tol, rounding_mode)) { @@ -126,10 +132,10 @@ TEST_F(LlvmLibcExp10Test, InDoubleRange) { } } } - tlog << " Exp10 failed: " << fails << "/" << count << "/" << cc - << " tests.\n"; - tlog << " Max ULPs is at most: " << static_cast<uint64_t>(tol) << ".\n"; if (fails) { + tlog << " Exp10 failed: " << fails << "/" << count << "/" << cc + << " tests.\n"; + tlog << " Max ULPs is at most: " << static_cast<uint64_t>(tol) << ".\n"; EXPECT_MPFR_MATCH(mpfr::Operation::Exp10, mx, mr, 0.5, rounding_mode); } }; diff --git a/libc/test/src/math/exp10f16_test.cpp b/libc/test/src/math/exp10f16_test.cpp index fc49331..7bd0850 100644 --- a/libc/test/src/math/exp10f16_test.cpp +++ b/libc/test/src/math/exp10f16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/exp10f16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcExp10f16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +34,8 @@ TEST_F(LlvmLibcExp10f16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp10, x, - LIBC_NAMESPACE::exp10f16(x), 0.5); + LIBC_NAMESPACE::exp10f16(x), + TOLERANCE + 0.5); } } @@ -35,6 +43,7 @@ TEST_F(LlvmLibcExp10f16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp10, x, - LIBC_NAMESPACE::exp10f16(x), 0.5); + LIBC_NAMESPACE::exp10f16(x), + TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/exp10m1f16_test.cpp b/libc/test/src/math/exp10m1f16_test.cpp index 41bb12f..4ba16e0 100644 --- a/libc/test/src/math/exp10m1f16_test.cpp +++ b/libc/test/src/math/exp10m1f16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/exp10m1f16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcExp10m1f16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +34,8 @@ TEST_F(LlvmLibcExp10m1f16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp10m1, x, - LIBC_NAMESPACE::exp10m1f16(x), 0.5); + LIBC_NAMESPACE::exp10m1f16(x), + TOLERANCE + 0.5); } } @@ -35,6 +43,7 @@ TEST_F(LlvmLibcExp10m1f16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp10m1, x, - LIBC_NAMESPACE::exp10m1f16(x), 0.5); + LIBC_NAMESPACE::exp10m1f16(x), + TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/exp10m1f_test.cpp b/libc/test/src/math/exp10m1f_test.cpp index 009c85d..b41a412 100644 --- a/libc/test/src/math/exp10m1f_test.cpp +++ b/libc/test/src/math/exp10m1f_test.cpp @@ -7,14 +7,20 @@ //===----------------------------------------------------------------------===// #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/CPP/array.h" #include "src/__support/libc_errno.h" +#include "src/__support/macros/optimization.h" #include "src/math/exp10m1f.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcExp10m1fTest = LIBC_NAMESPACE::testing::FPTest<float>; @@ -70,7 +76,8 @@ TEST_F(LlvmLibcExp10m1fTest, TrickyInputs) { for (float x : INPUTS) { EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp10m1, x, - LIBC_NAMESPACE::exp10m1f(x), 0.5); + LIBC_NAMESPACE::exp10m1f(x), + TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/exp2_test.cpp b/libc/test/src/math/exp2_test.cpp index a3dcb58..324cafd 100644 --- a/libc/test/src/math/exp2_test.cpp +++ b/libc/test/src/math/exp2_test.cpp @@ -101,10 +101,10 @@ TEST_F(LlvmLibcExp2Test, InDoubleRange) { } } } - tlog << " Exp2 failed: " << fails << "/" << count << "/" << cc - << " tests.\n"; - tlog << " Max ULPs is at most: " << static_cast<uint64_t>(tol) << ".\n"; if (fails) { + tlog << " Exp2 failed: " << fails << "/" << count << "/" << cc + << " tests.\n"; + tlog << " Max ULPs is at most: " << static_cast<uint64_t>(tol) << ".\n"; EXPECT_MPFR_MATCH(mpfr::Operation::Exp2, mx, mr, 0.5, rounding_mode); } }; diff --git a/libc/test/src/math/exp2f16_test.cpp b/libc/test/src/math/exp2f16_test.cpp index 503d8c2..d28632a 100644 --- a/libc/test/src/math/exp2f16_test.cpp +++ b/libc/test/src/math/exp2f16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/exp2f16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcExp2f16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +34,7 @@ TEST_F(LlvmLibcExp2f16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp2, x, - LIBC_NAMESPACE::exp2f16(x), 0.5); + LIBC_NAMESPACE::exp2f16(x), TOLERANCE + 0.5); } } @@ -35,6 +42,6 @@ TEST_F(LlvmLibcExp2f16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp2, x, - LIBC_NAMESPACE::exp2f16(x), 0.5); + LIBC_NAMESPACE::exp2f16(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/exp2f_test.cpp b/libc/test/src/math/exp2f_test.cpp index 94141dda..5e5fd69 100644 --- a/libc/test/src/math/exp2f_test.cpp +++ b/libc/test/src/math/exp2f_test.cpp @@ -7,14 +7,20 @@ //===----------------------------------------------------------------------===// #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" #include "src/__support/libc_errno.h" +#include "src/__support/macros/optimization.h" #include "src/math/exp2f.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcExp2fTest = LIBC_NAMESPACE::testing::FPTest<float>; @@ -71,7 +77,7 @@ TEST_F(LlvmLibcExp2fTest, TrickyInputs) { libc_errno = 0; float x = FPBits(INPUTS[i]).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp2, x, - LIBC_NAMESPACE::exp2f(x), 0.5); + LIBC_NAMESPACE::exp2f(x), TOLERANCE + 0.5); EXPECT_MATH_ERRNO(0); } } diff --git a/libc/test/src/math/exp2m1f16_test.cpp b/libc/test/src/math/exp2m1f16_test.cpp index bf29911..526484d8 100644 --- a/libc/test/src/math/exp2m1f16_test.cpp +++ b/libc/test/src/math/exp2m1f16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/exp2m1f16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcExp2m1f16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +34,8 @@ TEST_F(LlvmLibcExp2m1f16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp2m1, x, - LIBC_NAMESPACE::exp2m1f16(x), 0.5); + LIBC_NAMESPACE::exp2m1f16(x), + TOLERANCE + 0.5); } } @@ -35,6 +43,7 @@ TEST_F(LlvmLibcExp2m1f16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp2m1, x, - LIBC_NAMESPACE::exp2m1f16(x), 0.5); + LIBC_NAMESPACE::exp2m1f16(x), + TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/exp2m1f_test.cpp b/libc/test/src/math/exp2m1f_test.cpp index 7e9f6b5..16b20d7 100644 --- a/libc/test/src/math/exp2m1f_test.cpp +++ b/libc/test/src/math/exp2m1f_test.cpp @@ -7,15 +7,21 @@ //===----------------------------------------------------------------------===// #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/CPP/array.h" #include "src/__support/FPUtil/FPBits.h" #include "src/__support/libc_errno.h" +#include "src/__support/macros/optimization.h" #include "src/math/exp2m1f.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcExp2m1fTest = LIBC_NAMESPACE::testing::FPTest<float>; @@ -39,7 +45,7 @@ TEST_F(LlvmLibcExp2m1fTest, TrickyInputs) { for (float x : INPUTS) { EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp2m1, x, - LIBC_NAMESPACE::exp2m1f(x), 0.5); + LIBC_NAMESPACE::exp2m1f(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/exp_test.cpp b/libc/test/src/math/exp_test.cpp index 854bb51..5a785fb 100644 --- a/libc/test/src/math/exp_test.cpp +++ b/libc/test/src/math/exp_test.cpp @@ -7,13 +7,19 @@ //===----------------------------------------------------------------------===// #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/exp.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcExpTest = LIBC_NAMESPACE::testing::FPTest<double>; @@ -52,7 +58,7 @@ TEST_F(LlvmLibcExpTest, TrickyInputs) { for (int i = 0; i < N; ++i) { double x = FPBits(INPUTS[i]).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp, x, - LIBC_NAMESPACE::exp(x), 0.5); + LIBC_NAMESPACE::exp(x), TOLERANCE + 0.5); } } @@ -85,7 +91,7 @@ TEST_F(LlvmLibcExpTest, InDoubleRange) { ++count; // ASSERT_MPFR_MATCH(mpfr::Operation::Log, x, result, 0.5); if (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Exp, x, result, - 0.5, rounding_mode)) { + TOLERANCE + 0.5, rounding_mode)) { ++fails; while (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Exp, x, result, tol, rounding_mode)) { @@ -99,10 +105,10 @@ TEST_F(LlvmLibcExpTest, InDoubleRange) { } } } - tlog << " Exp failed: " << fails << "/" << count << "/" << cc - << " tests.\n"; - tlog << " Max ULPs is at most: " << static_cast<uint64_t>(tol) << ".\n"; if (fails) { + tlog << " Exp failed: " << fails << "/" << count << "/" << cc + << " tests.\n"; + tlog << " Max ULPs is at most: " << static_cast<uint64_t>(tol) << ".\n"; EXPECT_MPFR_MATCH(mpfr::Operation::Exp, mx, mr, 0.5, rounding_mode); } }; diff --git a/libc/test/src/math/expf16_test.cpp b/libc/test/src/math/expf16_test.cpp index ee89a9c..4ea10ea 100644 --- a/libc/test/src/math/expf16_test.cpp +++ b/libc/test/src/math/expf16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/expf16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcExpf16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +34,7 @@ TEST_F(LlvmLibcExpf16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp, x, - LIBC_NAMESPACE::expf16(x), 0.5); + LIBC_NAMESPACE::expf16(x), TOLERANCE + 0.5); } } @@ -35,6 +42,6 @@ TEST_F(LlvmLibcExpf16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp, x, - LIBC_NAMESPACE::expf16(x), 0.5); + LIBC_NAMESPACE::expf16(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/expf_test.cpp b/libc/test/src/math/expf_test.cpp index 1695a60..05167b8 100644 --- a/libc/test/src/math/expf_test.cpp +++ b/libc/test/src/math/expf_test.cpp @@ -7,14 +7,20 @@ //===----------------------------------------------------------------------===// #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" #include "src/__support/libc_errno.h" +#include "src/__support/macros/optimization.h" #include "src/math/expf.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcExpfTest = LIBC_NAMESPACE::testing::FPTest<float>; @@ -94,7 +100,7 @@ TEST_F(LlvmLibcExpfTest, Borderline) { x = FPBits(0xc236bd8cU).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Exp, x, - LIBC_NAMESPACE::expf(x), 0.5); + LIBC_NAMESPACE::expf(x), TOLERANCE + 0.5); EXPECT_MATH_ERRNO(0); } diff --git a/libc/test/src/math/expm1_test.cpp b/libc/test/src/math/expm1_test.cpp index c185be9..8ca9b77 100644 --- a/libc/test/src/math/expm1_test.cpp +++ b/libc/test/src/math/expm1_test.cpp @@ -7,13 +7,19 @@ //===----------------------------------------------------------------------===// #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/expm1.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcExpm1Test = LIBC_NAMESPACE::testing::FPTest<double>; @@ -36,9 +42,9 @@ TEST_F(LlvmLibcExpm1Test, TrickyInputs) { for (int i = 0; i < N; ++i) { double x = INPUTS[i]; EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Expm1, x, - LIBC_NAMESPACE::expm1(x), 0.5); + LIBC_NAMESPACE::expm1(x), TOLERANCE + 0.5); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Expm1, -x, - LIBC_NAMESPACE::expm1(-x), 0.5); + LIBC_NAMESPACE::expm1(-x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/expm1f16_test.cpp b/libc/test/src/math/expm1f16_test.cpp index a6a6fcf..952a879 100644 --- a/libc/test/src/math/expm1f16_test.cpp +++ b/libc/test/src/math/expm1f16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/expm1f16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcExpm1f16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +34,8 @@ TEST_F(LlvmLibcExpm1f16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Expm1, x, - LIBC_NAMESPACE::expm1f16(x), 0.5); + LIBC_NAMESPACE::expm1f16(x), + TOLERANCE + 0.5); } } @@ -35,6 +43,7 @@ TEST_F(LlvmLibcExpm1f16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Expm1, x, - LIBC_NAMESPACE::expm1f16(x), 0.5); + LIBC_NAMESPACE::expm1f16(x), + TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/expm1f_test.cpp b/libc/test/src/math/expm1f_test.cpp index 28d7106..2f7a9f2 100644 --- a/libc/test/src/math/expm1f_test.cpp +++ b/libc/test/src/math/expm1f_test.cpp @@ -7,14 +7,20 @@ //===----------------------------------------------------------------------===// #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" #include "src/__support/libc_errno.h" +#include "src/__support/macros/optimization.h" #include "src/math/expm1f.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcExpm1fTest = LIBC_NAMESPACE::testing::FPTest<float>; @@ -93,7 +99,7 @@ TEST_F(LlvmLibcExpm1fTest, Borderline) { x = FPBits(0x3e35bec5U).get_val(); ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Expm1, x, - LIBC_NAMESPACE::expm1f(x), 0.5); + LIBC_NAMESPACE::expm1f(x), TOLERANCE + 0.5); EXPECT_MATH_ERRNO(0); x = FPBits(0x942ed494U).get_val(); @@ -103,7 +109,7 @@ TEST_F(LlvmLibcExpm1fTest, Borderline) { x = FPBits(0xbdc1c6cbU).get_val(); ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Expm1, x, - LIBC_NAMESPACE::expm1f(x), 0.5); + LIBC_NAMESPACE::expm1f(x), TOLERANCE + 0.5); EXPECT_MATH_ERRNO(0); } diff --git a/libc/test/src/math/log10_test.cpp b/libc/test/src/math/log10_test.cpp index 62a19d02..3e48f74 100644 --- a/libc/test/src/math/log10_test.cpp +++ b/libc/test/src/math/log10_test.cpp @@ -7,13 +7,19 @@ //===----------------------------------------------------------------------===// #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/log10.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcLog10Test = LIBC_NAMESPACE::testing::FPTest<double>; @@ -66,7 +72,7 @@ TEST_F(LlvmLibcLog10Test, TrickyInputs) { for (int i = 0; i < N; ++i) { double x = FPBits(INPUTS[i]).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Log10, x, - LIBC_NAMESPACE::log10(x), 0.5); + LIBC_NAMESPACE::log10(x), TOLERANCE + 0.5); } } @@ -118,10 +124,10 @@ TEST_F(LlvmLibcLog10Test, InDoubleRange) { } } } - tlog << " Log10 failed: " << fails << "/" << count << "/" << cc - << " tests.\n"; - tlog << " Max ULPs is at most: " << static_cast<uint64_t>(tol) << ".\n"; if (fails) { + tlog << " Log10 failed: " << fails << "/" << count << "/" << cc + << " tests.\n"; + tlog << " Max ULPs is at most: " << static_cast<uint64_t>(tol) << ".\n"; EXPECT_MPFR_MATCH(mpfr::Operation::Log10, mx, mr, 0.5, rounding_mode); } }; diff --git a/libc/test/src/math/log10f16_test.cpp b/libc/test/src/math/log10f16_test.cpp index a71e330..0e456dc 100644 --- a/libc/test/src/math/log10f16_test.cpp +++ b/libc/test/src/math/log10f16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/log10f16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcLog10f16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +34,8 @@ TEST_F(LlvmLibcLog10f16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Log10, x, - LIBC_NAMESPACE::log10f16(x), 0.5); + LIBC_NAMESPACE::log10f16(x), + TOLERANCE + 0.5); } } @@ -35,6 +43,7 @@ TEST_F(LlvmLibcLog10f16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Log10, x, - LIBC_NAMESPACE::log10f16(x), 0.5); + LIBC_NAMESPACE::log10f16(x), + TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/log10f_test.cpp b/libc/test/src/math/log10f_test.cpp index c554948..1a1bd14 100644 --- a/libc/test/src/math/log10f_test.cpp +++ b/libc/test/src/math/log10f_test.cpp @@ -7,13 +7,19 @@ //===----------------------------------------------------------------------===// #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/log10f.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcLog10fTest = LIBC_NAMESPACE::testing::FPTest<float>; @@ -60,7 +66,7 @@ TEST_F(LlvmLibcLog10fTest, TrickyInputs) { for (int i = 0; i < N; ++i) { float x = FPBits(INPUTS[i]).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Log10, x, - LIBC_NAMESPACE::log10f(x), 0.5); + LIBC_NAMESPACE::log10f(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/log1p_test.cpp b/libc/test/src/math/log1p_test.cpp index be83ce8..dd72073 100644 --- a/libc/test/src/math/log1p_test.cpp +++ b/libc/test/src/math/log1p_test.cpp @@ -7,13 +7,19 @@ //===----------------------------------------------------------------------===// #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/log1p.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcLog1pTest = LIBC_NAMESPACE::testing::FPTest<double>; @@ -68,7 +74,7 @@ TEST_F(LlvmLibcLog1pTest, TrickyInputs) { for (int i = 0; i < N; ++i) { double x = FPBits(INPUTS[i]).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Log1p, x, - LIBC_NAMESPACE::log1p(x), 0.5); + LIBC_NAMESPACE::log1p(x), TOLERANCE + 0.5); } } @@ -107,9 +113,8 @@ TEST_F(LlvmLibcLog1pTest, InDoubleRange) { continue; ++count; - // ASSERT_MPFR_MATCH(mpfr::Operation::Log1p, x, result, 0.5); if (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Log1p, x, result, - 0.5, rounding_mode)) { + TOLERANCE + 0.5, rounding_mode)) { ++fails; while (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Log1p, x, result, tol, rounding_mode)) { @@ -119,10 +124,10 @@ TEST_F(LlvmLibcLog1pTest, InDoubleRange) { } } } - tlog << " Log1p failed: " << fails << "/" << count << "/" << cc - << " tests.\n"; - tlog << " Max ULPs is at most: " << static_cast<uint64_t>(tol) << ".\n"; if (fails) { + tlog << " Log1p failed: " << fails << "/" << count << "/" << cc + << " tests.\n"; + tlog << " Max ULPs is at most: " << static_cast<uint64_t>(tol) << ".\n"; EXPECT_MPFR_MATCH(mpfr::Operation::Log1p, mx, mr, 0.5, rounding_mode); } }; diff --git a/libc/test/src/math/log1pf_test.cpp b/libc/test/src/math/log1pf_test.cpp index 2df4526..ef61f5a 100644 --- a/libc/test/src/math/log1pf_test.cpp +++ b/libc/test/src/math/log1pf_test.cpp @@ -7,13 +7,19 @@ //===----------------------------------------------------------------------===// #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/log1pf.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcLog1pfTest = LIBC_NAMESPACE::testing::FPTest<float>; @@ -63,7 +69,7 @@ TEST_F(LlvmLibcLog1pfTest, TrickyInputs) { for (int i = 0; i < N; ++i) { float x = FPBits(INPUTS[i]).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Log1p, x, - LIBC_NAMESPACE::log1pf(x), 0.5); + LIBC_NAMESPACE::log1pf(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/log2_test.cpp b/libc/test/src/math/log2_test.cpp index 824a8a5..9eada85 100644 --- a/libc/test/src/math/log2_test.cpp +++ b/libc/test/src/math/log2_test.cpp @@ -117,10 +117,10 @@ TEST_F(LlvmLibcLog2Test, InDoubleRange) { } } } - tlog << " Log2 failed: " << fails << "/" << count << "/" << cc - << " tests.\n"; - tlog << " Max ULPs is at most: " << static_cast<uint64_t>(tol) << ".\n"; if (fails) { + tlog << " Log2 failed: " << fails << "/" << count << "/" << cc + << " tests.\n"; + tlog << " Max ULPs is at most: " << static_cast<uint64_t>(tol) << ".\n"; EXPECT_MPFR_MATCH(mpfr::Operation::Log2, mx, mr, 0.5, rounding_mode); } }; diff --git a/libc/test/src/math/log2f16_test.cpp b/libc/test/src/math/log2f16_test.cpp index 6630ca8..d277b12 100644 --- a/libc/test/src/math/log2f16_test.cpp +++ b/libc/test/src/math/log2f16_test.cpp @@ -6,11 +6,17 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/log2f16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcLog2f16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +33,7 @@ TEST_F(LlvmLibcLog2f16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Log2, x, - LIBC_NAMESPACE::log2f16(x), 0.5); + LIBC_NAMESPACE::log2f16(x), TOLERANCE + 0.5); } } @@ -35,6 +41,6 @@ TEST_F(LlvmLibcLog2f16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Log2, x, - LIBC_NAMESPACE::log2f16(x), 0.5); + LIBC_NAMESPACE::log2f16(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/log_test.cpp b/libc/test/src/math/log_test.cpp index a68d1687..c9a030a 100644 --- a/libc/test/src/math/log_test.cpp +++ b/libc/test/src/math/log_test.cpp @@ -7,13 +7,19 @@ //===----------------------------------------------------------------------===// #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/log.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcLogTest = LIBC_NAMESPACE::testing::FPTest<double>; @@ -106,7 +112,7 @@ TEST_F(LlvmLibcLogTest, InDoubleRange) { ++count; // ASSERT_MPFR_MATCH(mpfr::Operation::Log, x, result, 0.5); if (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Log, x, result, - 0.5, rounding_mode)) { + TOLERANCE + 0.5, rounding_mode)) { ++fails; while (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Log, x, result, tol, rounding_mode)) { @@ -116,10 +122,10 @@ TEST_F(LlvmLibcLogTest, InDoubleRange) { } } } - tlog << " Log failed: " << fails << "/" << count << "/" << cc - << " tests.\n"; - tlog << " Max ULPs is at most: " << static_cast<uint64_t>(tol) << ".\n"; if (fails) { + tlog << " Log failed: " << fails << "/" << count << "/" << cc + << " tests.\n"; + tlog << " Max ULPs is at most: " << static_cast<uint64_t>(tol) << ".\n"; EXPECT_MPFR_MATCH(mpfr::Operation::Log, mx, mr, 0.5, rounding_mode); } }; diff --git a/libc/test/src/math/logf16_test.cpp b/libc/test/src/math/logf16_test.cpp index 922918b..70d6304 100644 --- a/libc/test/src/math/logf16_test.cpp +++ b/libc/test/src/math/logf16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/logf16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcLogf16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +34,7 @@ TEST_F(LlvmLibcLogf16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Log, x, - LIBC_NAMESPACE::logf16(x), 0.5); + LIBC_NAMESPACE::logf16(x), TOLERANCE + 0.5); } } @@ -35,6 +42,6 @@ TEST_F(LlvmLibcLogf16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Log, x, - LIBC_NAMESPACE::logf16(x), 0.5); + LIBC_NAMESPACE::logf16(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/logf_test.cpp b/libc/test/src/math/logf_test.cpp index 7658075..fb99080 100644 --- a/libc/test/src/math/logf_test.cpp +++ b/libc/test/src/math/logf_test.cpp @@ -7,13 +7,19 @@ //===----------------------------------------------------------------------===// #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/logf.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcLogfTest = LIBC_NAMESPACE::testing::FPTest<float>; @@ -28,7 +34,11 @@ TEST_F(LlvmLibcLogfTest, SpecialNumbers) { EXPECT_FP_EQ_WITH_EXCEPTION(neg_inf, LIBC_NAMESPACE::logf(-0.0f), FE_DIVBYZERO); EXPECT_FP_IS_NAN_WITH_EXCEPTION(LIBC_NAMESPACE::logf(-1.0f), FE_INVALID); +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS + EXPECT_TRUE(0.0f == LIBC_NAMESPACE::logf(1.0f)); +#else EXPECT_FP_EQ_ALL_ROUNDING(zero, LIBC_NAMESPACE::logf(1.0f)); +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS } TEST_F(LlvmLibcLogfTest, TrickyInputs) { @@ -73,7 +83,7 @@ TEST_F(LlvmLibcLogfTest, TrickyInputs) { for (int i = 0; i < N; ++i) { float x = FPBits(INPUTS[i]).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Log, x, - LIBC_NAMESPACE::logf(x), 0.5); + LIBC_NAMESPACE::logf(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/powf_test.cpp b/libc/test/src/math/powf_test.cpp index 1d70724..fe8ef4f 100644 --- a/libc/test/src/math/powf_test.cpp +++ b/libc/test/src/math/powf_test.cpp @@ -7,13 +7,19 @@ //===----------------------------------------------------------------------===// #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/powf.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcPowfTest = LIBC_NAMESPACE::testing::FPTest<float>; using LIBC_NAMESPACE::testing::tlog; @@ -42,7 +48,7 @@ TEST_F(LlvmLibcPowfTest, TrickyInputs) { float x = INPUTS[i].x; float y = INPUTS[i].y; EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Pow, INPUTS[i], - LIBC_NAMESPACE::powf(x, y), 0.5); + LIBC_NAMESPACE::powf(x, y), TOLERANCE + 0.5); } } @@ -88,7 +94,8 @@ TEST_F(LlvmLibcPowfTest, InFloatRange) { mpfr::BinaryInput<float> inputs{x, y}; if (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Pow, inputs, - result, 0.5, rounding_mode)) { + result, TOLERANCE + 0.5, + rounding_mode)) { ++fails; while (!TEST_MPFR_MATCH_ROUNDING_SILENTLY( mpfr::Operation::Pow, inputs, result, tol, rounding_mode)) { diff --git a/libc/test/src/math/sin_test.cpp b/libc/test/src/math/sin_test.cpp index 4d5d9dd..7eac034 100644 --- a/libc/test/src/math/sin_test.cpp +++ b/libc/test/src/math/sin_test.cpp @@ -7,11 +7,18 @@ //===----------------------------------------------------------------------===// #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/sin.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcSinTest = LIBC_NAMESPACE::testing::FPTest<double>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -46,7 +53,7 @@ TEST_F(LlvmLibcSinTest, TrickyInputs) { for (int i = 0; i < N; ++i) { double x = INPUTS[i]; ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Sin, x, - LIBC_NAMESPACE::sin(x), 0.5); + LIBC_NAMESPACE::sin(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/sincos_test.cpp b/libc/test/src/math/sincos_test.cpp index 09c8715..30bfec6 100644 --- a/libc/test/src/math/sincos_test.cpp +++ b/libc/test/src/math/sincos_test.cpp @@ -7,11 +7,18 @@ //===----------------------------------------------------------------------===// #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/sincos.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcSincosTest = LIBC_NAMESPACE::testing::FPTest<double>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -26,36 +33,36 @@ using LIBC_NAMESPACE::testing::tlog; mpfr::ForceRoundingMode __r1(mpfr::RoundingMode::Nearest); \ if (__r1.success) { \ LIBC_NAMESPACE::sincos(input, &sin_x, &cos_x); \ - ASSERT_MPFR_MATCH(mpfr::Operation::Sin, input, sin_x, 0.5, \ + ASSERT_MPFR_MATCH(mpfr::Operation::Sin, input, sin_x, TOLERANCE + 0.5, \ mpfr::RoundingMode::Nearest); \ - ASSERT_MPFR_MATCH(mpfr::Operation::Cos, input, cos_x, 0.5, \ + ASSERT_MPFR_MATCH(mpfr::Operation::Cos, input, cos_x, TOLERANCE + 0.5, \ mpfr::RoundingMode::Nearest); \ } \ \ mpfr::ForceRoundingMode __r2(mpfr::RoundingMode::Upward); \ if (__r2.success) { \ LIBC_NAMESPACE::sincos(input, &sin_x, &cos_x); \ - ASSERT_MPFR_MATCH(mpfr::Operation::Sin, input, sin_x, 0.5, \ + ASSERT_MPFR_MATCH(mpfr::Operation::Sin, input, sin_x, TOLERANCE + 0.5, \ mpfr::RoundingMode::Upward); \ - ASSERT_MPFR_MATCH(mpfr::Operation::Cos, input, cos_x, 0.5, \ + ASSERT_MPFR_MATCH(mpfr::Operation::Cos, input, cos_x, TOLERANCE + 0.5, \ mpfr::RoundingMode::Upward); \ } \ \ mpfr::ForceRoundingMode __r3(mpfr::RoundingMode::Downward); \ if (__r3.success) { \ LIBC_NAMESPACE::sincos(input, &sin_x, &cos_x); \ - ASSERT_MPFR_MATCH(mpfr::Operation::Sin, input, sin_x, 0.5, \ + ASSERT_MPFR_MATCH(mpfr::Operation::Sin, input, sin_x, TOLERANCE + 0.5, \ mpfr::RoundingMode::Downward); \ - ASSERT_MPFR_MATCH(mpfr::Operation::Cos, input, cos_x, 0.5, \ + ASSERT_MPFR_MATCH(mpfr::Operation::Cos, input, cos_x, TOLERANCE + 0.5, \ mpfr::RoundingMode::Downward); \ } \ \ mpfr::ForceRoundingMode __r4(mpfr::RoundingMode::TowardZero); \ if (__r4.success) { \ LIBC_NAMESPACE::sincos(input, &sin_x, &cos_x); \ - ASSERT_MPFR_MATCH(mpfr::Operation::Sin, input, sin_x, 0.5, \ + ASSERT_MPFR_MATCH(mpfr::Operation::Sin, input, sin_x, TOLERANCE + 0.5, \ mpfr::RoundingMode::TowardZero); \ - ASSERT_MPFR_MATCH(mpfr::Operation::Cos, input, cos_x, 0.5, \ + ASSERT_MPFR_MATCH(mpfr::Operation::Cos, input, cos_x, TOLERANCE + 0.5, \ mpfr::RoundingMode::TowardZero); \ } \ } while (0) diff --git a/libc/test/src/math/sincosf_test.cpp b/libc/test/src/math/sincosf_test.cpp index 87e995d..9e6263b 100644 --- a/libc/test/src/math/sincosf_test.cpp +++ b/libc/test/src/math/sincosf_test.cpp @@ -8,14 +8,20 @@ #include "hdr/errno_macros.h" #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/sincosf.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "test/src/math/sdcomp26094.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcSinCosfTest = LIBC_NAMESPACE::testing::FPTest<float>; @@ -60,36 +66,36 @@ TEST_F(LlvmLibcSinCosfTest, SpecialNumbers) { mpfr::ForceRoundingMode __r1(mpfr::RoundingMode::Nearest); \ if (__r1.success) { \ LIBC_NAMESPACE::sincosf(input, &sin, &cos); \ - EXPECT_MPFR_MATCH(mpfr::Operation::Sin, input, sin, 0.5, \ + EXPECT_MPFR_MATCH(mpfr::Operation::Sin, input, sin, TOLERANCE + 0.5, \ mpfr::RoundingMode::Nearest); \ - EXPECT_MPFR_MATCH(mpfr::Operation::Cos, input, cos, 0.5, \ + EXPECT_MPFR_MATCH(mpfr::Operation::Cos, input, cos, TOLERANCE + 0.5, \ mpfr::RoundingMode::Nearest); \ } \ \ mpfr::ForceRoundingMode __r2(mpfr::RoundingMode::Upward); \ if (__r2.success) { \ LIBC_NAMESPACE::sincosf(input, &sin, &cos); \ - EXPECT_MPFR_MATCH(mpfr::Operation::Sin, input, sin, 0.5, \ + EXPECT_MPFR_MATCH(mpfr::Operation::Sin, input, sin, TOLERANCE + 0.5, \ mpfr::RoundingMode::Upward); \ - EXPECT_MPFR_MATCH(mpfr::Operation::Cos, input, cos, 0.5, \ + EXPECT_MPFR_MATCH(mpfr::Operation::Cos, input, cos, TOLERANCE + 0.5, \ mpfr::RoundingMode::Upward); \ } \ \ mpfr::ForceRoundingMode __r3(mpfr::RoundingMode::Downward); \ if (__r3.success) { \ LIBC_NAMESPACE::sincosf(input, &sin, &cos); \ - EXPECT_MPFR_MATCH(mpfr::Operation::Sin, input, sin, 0.5, \ + EXPECT_MPFR_MATCH(mpfr::Operation::Sin, input, sin, TOLERANCE + 0.5, \ mpfr::RoundingMode::Downward); \ - EXPECT_MPFR_MATCH(mpfr::Operation::Cos, input, cos, 0.5, \ + EXPECT_MPFR_MATCH(mpfr::Operation::Cos, input, cos, TOLERANCE + 0.5, \ mpfr::RoundingMode::Downward); \ } \ \ mpfr::ForceRoundingMode __r4(mpfr::RoundingMode::TowardZero); \ if (__r4.success) { \ LIBC_NAMESPACE::sincosf(input, &sin, &cos); \ - EXPECT_MPFR_MATCH(mpfr::Operation::Sin, input, sin, 0.5, \ + EXPECT_MPFR_MATCH(mpfr::Operation::Sin, input, sin, TOLERANCE + 0.5, \ mpfr::RoundingMode::TowardZero); \ - EXPECT_MPFR_MATCH(mpfr::Operation::Cos, input, cos, 0.5, \ + EXPECT_MPFR_MATCH(mpfr::Operation::Cos, input, cos, TOLERANCE + 0.5, \ mpfr::RoundingMode::TowardZero); \ } \ } diff --git a/libc/test/src/math/sinf16_test.cpp b/libc/test/src/math/sinf16_test.cpp index b05501c..d8e9e02 100644 --- a/libc/test/src/math/sinf16_test.cpp +++ b/libc/test/src/math/sinf16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/sinf16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcSinf16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +34,7 @@ TEST_F(LlvmLibcSinf16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Sin, x, - LIBC_NAMESPACE::sinf16(x), 0.5); + LIBC_NAMESPACE::sinf16(x), TOLERANCE + 0.5); } } @@ -35,6 +42,6 @@ TEST_F(LlvmLibcSinf16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Sin, x, - LIBC_NAMESPACE::sinf16(x), 0.5); + LIBC_NAMESPACE::sinf16(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/sinf_test.cpp b/libc/test/src/math/sinf_test.cpp index 42de7af..516941b 100644 --- a/libc/test/src/math/sinf_test.cpp +++ b/libc/test/src/math/sinf_test.cpp @@ -8,6 +8,7 @@ #include "hdr/errno_macros.h" #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" #include "src/math/sinf.h" #include "test/UnitTest/FPMatcher.h" @@ -15,7 +16,13 @@ #include "test/src/math/sdcomp26094.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 3 +#define FP_ASSERT ASSERT_MPFR_MATCH +#else +#define TOLERANCE 0 +#define FP_ASSERT ASSERT_MPFR_MATCH_ALL_ROUNDING +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcSinfTest = LIBC_NAMESPACE::testing::FPTest<float>; @@ -30,8 +37,13 @@ TEST_F(LlvmLibcSinfTest, SpecialNumbers) { EXPECT_FP_EQ(0.0f, LIBC_NAMESPACE::sinf(0.0f)); EXPECT_MATH_ERRNO(0); +// When accuracy is not required, signed zeros might not be preserved. +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS + EXPECT_TRUE(0.0f == LIBC_NAMESPACE::sinf(-0.0f)); +#else EXPECT_FP_EQ(-0.0f, LIBC_NAMESPACE::sinf(-0.0f)); EXPECT_MATH_ERRNO(0); +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS EXPECT_FP_EQ(aNaN, LIBC_NAMESPACE::sinf(inf)); EXPECT_MATH_ERRNO(EDOM); @@ -47,8 +59,8 @@ TEST_F(LlvmLibcSinfTest, InFloatRange) { float x = FPBits(v).get_val(); if (FPBits(v).is_nan() || FPBits(v).is_inf()) continue; - ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Sin, x, - LIBC_NAMESPACE::sinf(x), 0.5); + FP_ASSERT(mpfr::Operation::Sin, x, LIBC_NAMESPACE::sinf(x), + TOLERANCE + 0.5); } } @@ -95,22 +107,20 @@ TEST_F(LlvmLibcSinfTest, SpecificBitPatterns) { for (int i = 0; i < N; ++i) { float x = FPBits(INPUTS[i]).get_val(); - EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Sin, x, - LIBC_NAMESPACE::sinf(x), 0.5); - EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Sin, -x, - LIBC_NAMESPACE::sinf(-x), 0.5); + FP_ASSERT(mpfr::Operation::Sin, x, LIBC_NAMESPACE::sinf(x), + TOLERANCE + 0.5); + FP_ASSERT(mpfr::Operation::Sin, -x, LIBC_NAMESPACE::sinf(-x), + TOLERANCE + 0.5); } } // For small values, sin(x) is x. TEST_F(LlvmLibcSinfTest, SmallValues) { float x = FPBits(0x1780'0000U).get_val(); - EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Sin, x, - LIBC_NAMESPACE::sinf(x), 0.5); + FP_ASSERT(mpfr::Operation::Sin, x, LIBC_NAMESPACE::sinf(x), 0.5); x = FPBits(0x0040'0000U).get_val(); - EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Sin, x, - LIBC_NAMESPACE::sinf(x), 0.5); + FP_ASSERT(mpfr::Operation::Sin, x, LIBC_NAMESPACE::sinf(x), 0.5); } // SDCOMP-26094: check sinf in the cases for which the range reducer @@ -118,7 +128,7 @@ TEST_F(LlvmLibcSinfTest, SmallValues) { TEST_F(LlvmLibcSinfTest, SDCOMP_26094) { for (uint32_t v : SDCOMP26094_VALUES) { float x = FPBits((v)).get_val(); - EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Sin, x, - LIBC_NAMESPACE::sinf(x), 0.5); + FP_ASSERT(mpfr::Operation::Sin, x, LIBC_NAMESPACE::sinf(x), + TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/sinhf16_test.cpp b/libc/test/src/math/sinhf16_test.cpp index a16ab92..04e0730 100644 --- a/libc/test/src/math/sinhf16_test.cpp +++ b/libc/test/src/math/sinhf16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/sinhf16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcSinhf16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +34,7 @@ TEST_F(LlvmLibcSinhf16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Sinh, x, - LIBC_NAMESPACE::sinhf16(x), 0.5); + LIBC_NAMESPACE::sinhf16(x), TOLERANCE + 0.5); } } @@ -35,6 +42,6 @@ TEST_F(LlvmLibcSinhf16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Sinh, x, - LIBC_NAMESPACE::sinhf16(x), 0.5); + LIBC_NAMESPACE::sinhf16(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/sinhf_test.cpp b/libc/test/src/math/sinhf_test.cpp index 6f9ac2f..93a1eec 100644 --- a/libc/test/src/math/sinhf_test.cpp +++ b/libc/test/src/math/sinhf_test.cpp @@ -8,14 +8,20 @@ #include "hdr/errno_macros.h" #include "hdr/math_macros.h" +#include "hdr/stdint_proxy.h" #include "src/__support/CPP/array.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/sinhf.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" -#include "hdr/stdint_proxy.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS using LlvmLibcSinhfTest = LIBC_NAMESPACE::testing::FPTest<float>; @@ -79,9 +85,9 @@ TEST_F(LlvmLibcSinhfTest, Overflow) { TEST_F(LlvmLibcSinhfTest, ExceptionalValues) { float x = FPBits(uint32_t(0x3a12'85ffU)).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Sinh, x, - LIBC_NAMESPACE::sinhf(x), 0.5); + LIBC_NAMESPACE::sinhf(x), TOLERANCE + 0.5); x = -FPBits(uint32_t(0x3a12'85ffU)).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Sinh, x, - LIBC_NAMESPACE::sinhf(x), 0.5); + LIBC_NAMESPACE::sinhf(x), TOLERANCE + 0.5); } diff --git a/libc/test/src/math/smoke/sinf_test.cpp b/libc/test/src/math/smoke/sinf_test.cpp index b0ba81e..7b671c6 100644 --- a/libc/test/src/math/smoke/sinf_test.cpp +++ b/libc/test/src/math/smoke/sinf_test.cpp @@ -10,6 +10,7 @@ #include "hdr/math_macros.h" #include "hdr/stdint_proxy.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/sinf.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" @@ -26,8 +27,13 @@ TEST_F(LlvmLibcSinfTest, SpecialNumbers) { EXPECT_FP_EQ(0.0f, LIBC_NAMESPACE::sinf(0.0f)); EXPECT_MATH_ERRNO(0); +// When accuracy is not required, signed zeros might not be preserved. +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS + EXPECT_TRUE(0.0f == LIBC_NAMESPACE::sinf(-0.0f)); +#else EXPECT_FP_EQ(-0.0f, LIBC_NAMESPACE::sinf(-0.0f)); EXPECT_MATH_ERRNO(0); +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS EXPECT_FP_EQ(aNaN, LIBC_NAMESPACE::sinf(inf)); EXPECT_MATH_ERRNO(EDOM); diff --git a/libc/test/src/math/tan_test.cpp b/libc/test/src/math/tan_test.cpp index 12dfc02..62f8994 100644 --- a/libc/test/src/math/tan_test.cpp +++ b/libc/test/src/math/tan_test.cpp @@ -7,11 +7,18 @@ //===----------------------------------------------------------------------===// #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/tan.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 4 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcTanTest = LIBC_NAMESPACE::testing::FPTest<double>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -53,9 +60,9 @@ TEST_F(LlvmLibcTanTest, TrickyInputs) { for (int i = 0; i < N; ++i) { double x = INPUTS[i]; ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Tan, x, - LIBC_NAMESPACE::tan(x), 0.5); + LIBC_NAMESPACE::tan(x), TOLERANCE + 0.5); ASSERT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Tan, -x, - LIBC_NAMESPACE::tan(-x), 0.5); + LIBC_NAMESPACE::tan(-x), TOLERANCE + 0.5); } } @@ -89,7 +96,7 @@ TEST_F(LlvmLibcTanTest, InDoubleRange) { ++tested; if (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Tan, x, result, - 0.5, rounding_mode)) { + TOLERANCE + 0.5, rounding_mode)) { ++fails; while (!TEST_MPFR_MATCH_ROUNDING_SILENTLY(mpfr::Operation::Tan, x, result, ulp, rounding_mode)) { diff --git a/libc/test/src/math/tanf16_test.cpp b/libc/test/src/math/tanf16_test.cpp index f2e8741..cd4613b 100644 --- a/libc/test/src/math/tanf16_test.cpp +++ b/libc/test/src/math/tanf16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/tanf16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcTanf16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +34,7 @@ TEST_F(LlvmLibcTanf16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Tan, x, - LIBC_NAMESPACE::tanf16(x), 0.5); + LIBC_NAMESPACE::tanf16(x), TOLERANCE + 0.5); } } @@ -35,6 +42,6 @@ TEST_F(LlvmLibcTanf16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Tan, x, - LIBC_NAMESPACE::tanf16(x), 0.5); + LIBC_NAMESPACE::tanf16(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/tanf_test.cpp b/libc/test/src/math/tanf_test.cpp index 2202bd6..b384d8c 100644 --- a/libc/test/src/math/tanf_test.cpp +++ b/libc/test/src/math/tanf_test.cpp @@ -9,12 +9,19 @@ #include "hdr/errno_macros.h" #include "hdr/math_macros.h" #include "src/__support/FPUtil/FPBits.h" +#include "src/__support/macros/optimization.h" #include "src/math/tanf.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "test/src/math/sdcomp26094.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + #include "hdr/stdint_proxy.h" using LlvmLibcTanfTest = LIBC_NAMESPACE::testing::FPTest<float>; @@ -114,9 +121,9 @@ TEST_F(LlvmLibcTanfTest, SpecificBitPatterns) { for (int i = 0; i < N; ++i) { float x = FPBits(INPUTS[i]).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Tan, x, - LIBC_NAMESPACE::tanf(x), 0.5); + LIBC_NAMESPACE::tanf(x), TOLERANCE + 0.5); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Tan, -x, - LIBC_NAMESPACE::tanf(-x), 0.5); + LIBC_NAMESPACE::tanf(-x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/tanhf16_test.cpp b/libc/test/src/math/tanhf16_test.cpp index 7124a83..ff79604 100644 --- a/libc/test/src/math/tanhf16_test.cpp +++ b/libc/test/src/math/tanhf16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/tanhf16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcTanhf16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +34,7 @@ TEST_F(LlvmLibcTanhf16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Tanh, x, - LIBC_NAMESPACE::tanhf16(x), 0.5); + LIBC_NAMESPACE::tanhf16(x), TOLERANCE + 0.5); } } @@ -35,6 +42,6 @@ TEST_F(LlvmLibcTanhf16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Tanh, x, - LIBC_NAMESPACE::tanhf16(x), 0.5); + LIBC_NAMESPACE::tanhf16(x), TOLERANCE + 0.5); } } diff --git a/libc/test/src/math/tanpif16_test.cpp b/libc/test/src/math/tanpif16_test.cpp index 04d1f71..f994781 100644 --- a/libc/test/src/math/tanpif16_test.cpp +++ b/libc/test/src/math/tanpif16_test.cpp @@ -6,11 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "src/__support/macros/optimization.h" #include "src/math/tanpif16.h" #include "test/UnitTest/FPMatcher.h" #include "test/UnitTest/Test.h" #include "utils/MPFRWrapper/MPFRUtils.h" +#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +#define TOLERANCE 1 +#else +#define TOLERANCE 0 +#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS + using LlvmLibcTanpif16Test = LIBC_NAMESPACE::testing::FPTest<float16>; namespace mpfr = LIBC_NAMESPACE::testing::mpfr; @@ -27,7 +34,8 @@ TEST_F(LlvmLibcTanpif16Test, PositiveRange) { for (uint16_t v = POS_START; v <= POS_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Tanpi, x, - LIBC_NAMESPACE::tanpif16(x), 0.5); + LIBC_NAMESPACE::tanpif16(x), + TOLERANCE + 0.5); } } @@ -35,6 +43,7 @@ TEST_F(LlvmLibcTanpif16Test, NegativeRange) { for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { float16 x = FPBits(v).get_val(); EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Tanpi, x, - LIBC_NAMESPACE::tanpif16(x), 0.5); + LIBC_NAMESPACE::tanpif16(x), + TOLERANCE + 0.5); } } diff --git a/libclc/cmake/modules/AddLibclc.cmake b/libclc/cmake/modules/AddLibclc.cmake index d8c2219..3228926 100644 --- a/libclc/cmake/modules/AddLibclc.cmake +++ b/libclc/cmake/modules/AddLibclc.cmake @@ -392,7 +392,7 @@ function(add_libclc_builtin_set) list( PREPEND bytecode_files ${bytecode_ir_files} ) if( NOT bytecode_files ) - message(FATAL_ERROR "Cannot create an empty builtins library") + message(FATAL_ERROR "Cannot create an empty builtins library for ${ARG_ARCH_SUFFIX}") endif() set( builtins_link_lib_tgt builtins.link.${ARG_ARCH_SUFFIX} ) diff --git a/libcxx/include/__cxx03/__algorithm/for_each.h b/libcxx/include/__cxx03/__algorithm/for_each.h index d160a9e..1ffb013 100644 --- a/libcxx/include/__cxx03/__algorithm/for_each.h +++ b/libcxx/include/__cxx03/__algorithm/for_each.h @@ -14,15 +14,11 @@ #include <__cxx03/__config> #include <__cxx03/__iterator/segmented_iterator.h> #include <__cxx03/__type_traits/enable_if.h> -#include <__cxx03/__utility/move.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header #endif -_LIBCPP_PUSH_MACROS -#include <__cxx03/__undef_macros> - _LIBCPP_BEGIN_NAMESPACE_STD template <class _InputIterator, class _Function> @@ -34,6 +30,4 @@ _LIBCPP_HIDE_FROM_ABI _Function for_each(_InputIterator __first, _InputIterator _LIBCPP_END_NAMESPACE_STD -_LIBCPP_POP_MACROS - #endif // _LIBCPP___CXX03___ALGORITHM_FOR_EACH_H diff --git a/lld/test/COFF/Inputs/undefined-symbol-lto-a.ll b/lld/test/COFF/Inputs/undefined-symbol-lto-a.ll index 7e29044..f57a3e3 100644 --- a/lld/test/COFF/Inputs/undefined-symbol-lto-a.ll +++ b/lld/test/COFF/Inputs/undefined-symbol-lto-a.ll @@ -47,8 +47,8 @@ entry: ret void } -attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nounwind sspstrong uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } +attributes #1 = { nounwind sspstrong uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } attributes #2 = { nounwind } !llvm.linker.options = !{!1, !2} diff --git a/lld/test/COFF/Inputs/undefined-symbol-lto-b.ll b/lld/test/COFF/Inputs/undefined-symbol-lto-b.ll index 0f64e23..7347fde 100644 --- a/lld/test/COFF/Inputs/undefined-symbol-lto-b.ll +++ b/lld/test/COFF/Inputs/undefined-symbol-lto-b.ll @@ -11,7 +11,7 @@ entry: ret void } -attributes #0 = { norecurse nounwind readnone sspstrong uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { norecurse nounwind readnone sspstrong uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } !llvm.linker.options = !{!0, !1} !llvm.module.flags = !{!2, !3, !4, !5} diff --git a/lld/test/COFF/Inputs/undefined-symbol-multi-lto.ll b/lld/test/COFF/Inputs/undefined-symbol-multi-lto.ll index 5f67302..61828cae 100644 --- a/lld/test/COFF/Inputs/undefined-symbol-multi-lto.ll +++ b/lld/test/COFF/Inputs/undefined-symbol-multi-lto.ll @@ -13,8 +13,8 @@ declare dso_local i32 @"?foo@@YAHXZ"() #1 declare dso_local i32 @"?bar@@YAHXZ"() #1 -attributes #0 = { noinline optnone uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { noinline optnone uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } +attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } !llvm.module.flags = !{!0} !llvm.ident = !{!1} diff --git a/lld/test/MachO/lto-mattrs.ll b/lld/test/MachO/lto-mattrs.ll index f658b48..4131329 100644 --- a/lld/test/MachO/lto-mattrs.ll +++ b/lld/test/MachO/lto-mattrs.ll @@ -33,4 +33,4 @@ define float @foo(float %x) #0 { ret float %div } -attributes #0 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf,vec-divf" } +attributes #0 = { "reciprocal-estimates"="divf,vec-divf" } diff --git a/lld/test/wasm/Inputs/debuginfo1.ll b/lld/test/wasm/Inputs/debuginfo1.ll index d6db880..0a1c42a 100644 --- a/lld/test/wasm/Inputs/debuginfo1.ll +++ b/lld/test/wasm/Inputs/debuginfo1.ll @@ -35,9 +35,9 @@ declare void @foo(i32) local_unnamed_addr #2 ; Function Attrs: nounwind readnone speculatable declare void @llvm.dbg.value(metadata, metadata, metadata) #3 -attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "use-soft-float"="false" } +attributes #1 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "use-soft-float"="false" } +attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "use-soft-float"="false" } attributes #3 = { nounwind readnone speculatable } attributes #4 = { nounwind } diff --git a/lld/test/wasm/Inputs/debuginfo2.ll b/lld/test/wasm/Inputs/debuginfo2.ll index 1b63dd5..c832be5 100644 --- a/lld/test/wasm/Inputs/debuginfo2.ll +++ b/lld/test/wasm/Inputs/debuginfo2.ll @@ -31,7 +31,7 @@ entry: ; Function Attrs: nounwind readnone speculatable declare void @llvm.dbg.value(metadata, metadata, metadata) #1 -attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "use-soft-float"="false" } attributes #1 = { nounwind readnone speculatable } !llvm.dbg.cu = !{!2} diff --git a/lld/test/wasm/debug-removed-fn.ll b/lld/test/wasm/debug-removed-fn.ll index 8dae48a..20c3034 100644 --- a/lld/test/wasm/debug-removed-fn.ll +++ b/lld/test/wasm/debug-removed-fn.ll @@ -28,7 +28,7 @@ entry: ret i32 6, !dbg !13 } -attributes #0 = { noinline nounwind optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { noinline nounwind optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "use-soft-float"="false" } !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!3, !4, !5} diff --git a/lldb/bindings/interface/SBPlatformExtensions.i b/lldb/bindings/interface/SBPlatformExtensions.i new file mode 100644 index 0000000..86759bd --- /dev/null +++ b/lldb/bindings/interface/SBPlatformExtensions.i @@ -0,0 +1,7 @@ +%extend lldb::SBPlatform { +#ifdef SWIGPYTHON + %pythoncode %{ + is_host = property(IsHost, None, doc='''A read only property that returns a boolean value that indiciates if this platform is the host platform.''') + %} +#endif +} diff --git a/lldb/bindings/interfaces.swig b/lldb/bindings/interfaces.swig index e71ed13..b3d4497 100644 --- a/lldb/bindings/interfaces.swig +++ b/lldb/bindings/interfaces.swig @@ -53,6 +53,7 @@ %include "./interface/SBModuleSpecDocstrings.i" %include "./interface/SBMutexExtensions.i" %include "./interface/SBPlatformDocstrings.i" +%include "./interface/SBPlatformExtensions.i" %include "./interface/SBProcessDocstrings.i" %include "./interface/SBProcessInfoDocstrings.i" %include "./interface/SBProgressDocstrings.i" diff --git a/lldb/include/lldb/API/SBPlatform.h b/lldb/include/lldb/API/SBPlatform.h index d63d2ed..7cb19b3 100644 --- a/lldb/include/lldb/API/SBPlatform.h +++ b/lldb/include/lldb/API/SBPlatform.h @@ -112,6 +112,9 @@ public: bool IsValid() const; + /// Returns true if this platform is the host platform, otherwise false. + bool IsHost() const; + void Clear(); const char *GetWorkingDirectory(); diff --git a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py index a3d924d..d892c01 100644 --- a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py +++ b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py @@ -11,6 +11,7 @@ import subprocess import signal import sys import threading +import warnings import time from typing import ( Any, @@ -383,6 +384,10 @@ class DebugCommunication(object): """Process received packets, updating the session state.""" with self._recv_condition: for packet in self._recv_packets: + if packet and ("seq" not in packet or packet["seq"] == 0): + warnings.warn( + f"received a malformed packet, expected 'seq != 0' for {packet!r}" + ) # Handle events that may modify any stateful properties of # the DAP session. if packet and packet["type"] == "event": @@ -576,6 +581,7 @@ class DebugCommunication(object): # If we exited, then we are done if stopped_event["event"] == "exited": break + # Otherwise we stopped and there might be one or more 'stopped' # events for each thread that stopped with a reason, so keep # checking for more 'stopped' events and return all of them diff --git a/lldb/source/API/SBPlatform.cpp b/lldb/source/API/SBPlatform.cpp index ec59e27..9a0b47c 100644 --- a/lldb/source/API/SBPlatform.cpp +++ b/lldb/source/API/SBPlatform.cpp @@ -331,6 +331,11 @@ SBPlatform::operator bool() const { return m_opaque_sp.get() != nullptr; } +bool SBPlatform::IsHost() const { + LLDB_INSTRUMENT_VA(this); + return m_opaque_sp.get() != nullptr && m_opaque_sp->IsHost(); +} + void SBPlatform::Clear() { LLDB_INSTRUMENT_VA(this); diff --git a/lldb/test/API/functionalities/thread/finish-from-empty-func/TestEmptyFuncThreadStepOut.py b/lldb/test/API/functionalities/thread/finish-from-empty-func/TestEmptyFuncThreadStepOut.py index f5d3da53..c95a57f 100644 --- a/lldb/test/API/functionalities/thread/finish-from-empty-func/TestEmptyFuncThreadStepOut.py +++ b/lldb/test/API/functionalities/thread/finish-from-empty-func/TestEmptyFuncThreadStepOut.py @@ -13,12 +13,31 @@ class FinishFromEmptyFunctionTestCase(TestBase): @skipIf(compiler="clang", compiler_version=['<', '17.0']) def test_finish_from_empty_function(self): - """Test that when stopped at a breakpoint in an empty function, finish leaves it correctly.""" + """Test that when stopped at a breakpoint located at the last instruction + of a function, finish leaves it correctly.""" self.build() - exe = self.getBuildArtifact("a.out") - target, process, thread, _ = lldbutil.run_to_name_breakpoint( - self, "done", exe_name=exe + target, _, thread, _ = lldbutil.run_to_source_breakpoint( + self, "// Set breakpoint here", lldb.SBFileSpec("main.c") ) + # Find the address of the last instruction of 'done()' and set a breakpoint there. + # Even though 'done()' is empty, it may contain prologue and epilogue code, so + # simply setting a breakpoint at the function can place it before 'ret'. + error = lldb.SBError() + ret_bp_addr = lldb.SBAddress() + while True: + thread.StepInstruction(False, error) + self.assertTrue(error.Success()) + frame = thread.GetSelectedFrame() + if "done" in frame.GetFunctionName(): + ret_bp_addr = frame.GetPCAddress() + elif ret_bp_addr.IsValid(): + # The entire function 'done()' has been stepped through, so 'ret_bp_addr' + # now contains the address of its last instruction, i.e. 'ret'. + break + ret_bp = target.BreakpointCreateByAddress(ret_bp_addr.GetLoadAddress(target)) + self.assertTrue(ret_bp.IsValid()) + # Resume the execution and hit the new breakpoint. + self.runCmd("cont") if self.TraceOn(): self.runCmd("bt") @@ -29,7 +48,6 @@ class FinishFromEmptyFunctionTestCase(TestBase): ) self.assertTrue(safety_bp.IsValid()) - error = lldb.SBError() thread.StepOut(error) self.assertTrue(error.Success()) diff --git a/lldb/test/API/functionalities/thread/finish-from-empty-func/main.c b/lldb/test/API/functionalities/thread/finish-from-empty-func/main.c index bc66a548..b3f90db5 100644 --- a/lldb/test/API/functionalities/thread/finish-from-empty-func/main.c +++ b/lldb/test/API/functionalities/thread/finish-from-empty-func/main.c @@ -2,6 +2,7 @@ void done() {} int main() { puts("in main"); + done(); // Set breakpoint here done(); puts("leaving main"); return 0; diff --git a/lldb/tools/lldb-dap/DAP.cpp b/lldb/tools/lldb-dap/DAP.cpp index 61226cc..52c8c6b 100644 --- a/lldb/tools/lldb-dap/DAP.cpp +++ b/lldb/tools/lldb-dap/DAP.cpp @@ -266,40 +266,49 @@ void DAP::SendJSON(const llvm::json::Value &json) { Send(message); } -void DAP::Send(const Message &message) { - if (const protocol::Event *event = std::get_if<protocol::Event>(&message)) { +Id DAP::Send(const Message &message) { + std::lock_guard<std::mutex> guard(call_mutex); + Message msg = std::visit( + [this](auto &&msg) -> Message { + if (msg.seq == kCalculateSeq) + msg.seq = seq++; + return msg; + }, + Message(message)); + + if (const protocol::Event *event = std::get_if<protocol::Event>(&msg)) { if (llvm::Error err = transport.Send(*event)) DAP_LOG_ERROR(log, std::move(err), "({0}) sending event failed", m_client_name); - return; + return event->seq; } - if (const Request *req = std::get_if<Request>(&message)) { + if (const Request *req = std::get_if<Request>(&msg)) { if (llvm::Error err = transport.Send(*req)) DAP_LOG_ERROR(log, std::move(err), "({0}) sending request failed", m_client_name); - return; + return req->seq; } - if (const Response *resp = std::get_if<Response>(&message)) { + if (const Response *resp = std::get_if<Response>(&msg)) { // FIXME: After all the requests have migrated from LegacyRequestHandler > // RequestHandler<> this should be handled in RequestHandler<>::operator(). // If the debugger was interrupted, convert this response into a // 'cancelled' response because we might have a partial result. - llvm::Error err = - (debugger.InterruptRequested()) - ? transport.Send({/*request_seq=*/resp->request_seq, - /*command=*/resp->command, - /*success=*/false, - /*message=*/eResponseMessageCancelled, - /*body=*/std::nullopt}) - : transport.Send(*resp); - if (err) { + llvm::Error err = (debugger.InterruptRequested()) + ? transport.Send({ + /*request_seq=*/resp->request_seq, + /*command=*/resp->command, + /*success=*/false, + /*message=*/eResponseMessageCancelled, + /*body=*/std::nullopt, + /*seq=*/resp->seq, + }) + : transport.Send(*resp); + if (err) DAP_LOG_ERROR(log, std::move(err), "({0}) sending response failed", m_client_name); - return; - } - return; + return resp->seq; } llvm_unreachable("Unexpected message type"); diff --git a/lldb/tools/lldb-dap/DAP.h b/lldb/tools/lldb-dap/DAP.h index bf2c3f1..b4f111e 100644 --- a/lldb/tools/lldb-dap/DAP.h +++ b/lldb/tools/lldb-dap/DAP.h @@ -136,7 +136,7 @@ struct DAP final : public DAPTransport::MessageHandler { /// unless we send a "thread" event to indicate the thread exited. llvm::DenseSet<lldb::tid_t> thread_ids; - uint32_t reverse_request_seq = 0; + protocol::Id seq = 0; std::mutex call_mutex; llvm::SmallDenseMap<int64_t, std::unique_ptr<ResponseHandler>> inflight_reverse_requests; @@ -220,8 +220,8 @@ struct DAP final : public DAPTransport::MessageHandler { /// Serialize the JSON value into a string and send the JSON packet to the /// "out" stream. void SendJSON(const llvm::json::Value &json); - /// Send the given message to the client - void Send(const protocol::Message &message); + /// Send the given message to the client. + protocol::Id Send(const protocol::Message &message); void SendOutput(OutputType o, const llvm::StringRef output); @@ -353,19 +353,13 @@ struct DAP final : public DAPTransport::MessageHandler { template <typename Handler> void SendReverseRequest(llvm::StringRef command, llvm::json::Value arguments) { - int64_t id; - { - std::lock_guard<std::mutex> locker(call_mutex); - id = ++reverse_request_seq; - inflight_reverse_requests[id] = std::make_unique<Handler>(command, id); - } - - SendJSON(llvm::json::Object{ - {"type", "request"}, - {"seq", id}, - {"command", command}, - {"arguments", std::move(arguments)}, + protocol::Id id = Send(protocol::Request{ + command.str(), + std::move(arguments), }); + + std::lock_guard<std::mutex> locker(call_mutex); + inflight_reverse_requests[id] = std::make_unique<Handler>(command, id); } /// The set of capabilities supported by this adapter. diff --git a/lldb/tools/lldb-dap/EventHelper.cpp b/lldb/tools/lldb-dap/EventHelper.cpp index 3042d32..c5d5f2b 100644 --- a/lldb/tools/lldb-dap/EventHelper.cpp +++ b/lldb/tools/lldb-dap/EventHelper.cpp @@ -15,6 +15,7 @@ #include "Protocol/ProtocolRequests.h" #include "Protocol/ProtocolTypes.h" #include "lldb/API/SBFileSpec.h" +#include "lldb/API/SBPlatform.h" #include "llvm/Support/Error.h" #include <utility> @@ -78,11 +79,9 @@ void SendExtraCapabilities(DAP &dap) { // { "$ref": "#/definitions/Event" }, // { // "type": "object", -// "description": "Event message for 'process' event type. The event -// indicates that the debugger has begun debugging a -// new process. Either one that it has launched, or one -// that it has attached to.", -// "properties": { +// "description": "The event indicates that the debugger has begun +// debugging a new process. Either one that it has launched, or one that +// it has attached to.", "properties": { // "event": { // "type": "string", // "enum": [ "process" ] @@ -93,31 +92,37 @@ void SendExtraCapabilities(DAP &dap) { // "name": { // "type": "string", // "description": "The logical name of the process. This is -// usually the full path to process's executable -// file. Example: /home/myproj/program.js." +// usually the full path to process's executable file. Example: +// /home/example/myproj/program.js." // }, // "systemProcessId": { // "type": "integer", -// "description": "The system process id of the debugged process. -// This property will be missing for non-system -// processes." +// "description": "The process ID of the debugged process, as +// assigned by the operating system. This property should be +// omitted for logical processes that do not map to operating +// system processes on the machine." // }, // "isLocalProcess": { // "type": "boolean", // "description": "If true, the process is running on the same -// computer as the debug adapter." +// computer as the debug adapter." // }, // "startMethod": { // "type": "string", // "enum": [ "launch", "attach", "attachForSuspendedLaunch" ], // "description": "Describes how the debug engine started -// debugging this process.", -// "enumDescriptions": [ +// debugging this process.", "enumDescriptions": [ // "Process was launched under the debugger.", // "Debugger attached to an existing process.", -// "A project launcher component has launched a new process in -// a suspended state and then asked the debugger to attach." +// "A project launcher component has launched a new process in a +// suspended state and then asked the debugger to attach." // ] +// }, +// "pointerSize": { +// "type": "integer", +// "description": "The size of a pointer or address for this +// process, in bits. This value may be used by clients when +// formatting addresses for display." // } // }, // "required": [ "name" ] @@ -126,7 +131,7 @@ void SendExtraCapabilities(DAP &dap) { // "required": [ "event", "body" ] // } // ] -// } +// }, void SendProcessEvent(DAP &dap, LaunchMethod launch_method) { lldb::SBFileSpec exe_fspec = dap.target.GetExecutable(); char exe_path[PATH_MAX]; @@ -136,7 +141,8 @@ void SendProcessEvent(DAP &dap, LaunchMethod launch_method) { EmplaceSafeString(body, "name", exe_path); const auto pid = dap.target.GetProcess().GetProcessID(); body.try_emplace("systemProcessId", (int64_t)pid); - body.try_emplace("isLocalProcess", true); + body.try_emplace("isLocalProcess", dap.target.GetPlatform().IsHost()); + body.try_emplace("pointerSize", dap.target.GetAddressByteSize() * 8); const char *startMethod = nullptr; switch (launch_method) { case Launch: diff --git a/lldb/tools/lldb-dap/Handler/ExceptionInfoRequestHandler.cpp b/lldb/tools/lldb-dap/Handler/ExceptionInfoRequestHandler.cpp index c1c2adb..fa01a20 100644 --- a/lldb/tools/lldb-dap/Handler/ExceptionInfoRequestHandler.cpp +++ b/lldb/tools/lldb-dap/Handler/ExceptionInfoRequestHandler.cpp @@ -7,168 +7,77 @@ //===----------------------------------------------------------------------===// #include "DAP.h" -#include "EventHelper.h" -#include "JSONUtils.h" +#include "DAPError.h" +#include "Protocol/ProtocolRequests.h" +#include "Protocol/ProtocolTypes.h" #include "RequestHandler.h" #include "lldb/API/SBStream.h" +using namespace lldb_dap::protocol; + namespace lldb_dap { -// "ExceptionInfoRequest": { -// "allOf": [ { "$ref": "#/definitions/Request" }, { -// "type": "object", -// "description": "Retrieves the details of the exception that -// caused this event to be raised. Clients should only call this request if -// the corresponding capability `supportsExceptionInfoRequest` is true.", -// "properties": { -// "command": { -// "type": "string", -// "enum": [ "exceptionInfo" ] -// }, -// "arguments": { -// "$ref": "#/definitions/ExceptionInfoArguments" -// } -// }, -// "required": [ "command", "arguments" ] -// }] -// }, -// "ExceptionInfoArguments": { -// "type": "object", -// "description": "Arguments for `exceptionInfo` request.", -// "properties": { -// "threadId": { -// "type": "integer", -// "description": "Thread for which exception information should be -// retrieved." -// } -// }, -// "required": [ "threadId" ] -// }, -// "ExceptionInfoResponse": { -// "allOf": [ { "$ref": "#/definitions/Response" }, { -// "type": "object", -// "description": "Response to `exceptionInfo` request.", -// "properties": { -// "body": { -// "type": "object", -// "properties": { -// "exceptionId": { -// "type": "string", -// "description": "ID of the exception that was thrown." -// }, -// "description": { -// "type": "string", -// "description": "Descriptive text for the exception." -// }, -// "breakMode": { -// "$ref": "#/definitions/ExceptionBreakMode", -// "description": "Mode that caused the exception notification to -// be raised." -// }, -// "details": { -// "$ref": "#/definitions/ExceptionDetails", -// "description": "Detailed information about the exception." -// } -// }, -// "required": [ "exceptionId", "breakMode" ] -// } -// }, -// "required": [ "body" ] -// }] -// } -// "ExceptionDetails": { -// "type": "object", -// "description": "Detailed information about an exception that has -// occurred.", "properties": { -// "message": { -// "type": "string", -// "description": "Message contained in the exception." -// }, -// "typeName": { -// "type": "string", -// "description": "Short type name of the exception object." -// }, -// "fullTypeName": { -// "type": "string", -// "description": "Fully-qualified type name of the exception object." -// }, -// "evaluateName": { -// "type": "string", -// "description": "An expression that can be evaluated in the current -// scope to obtain the exception object." -// }, -// "stackTrace": { -// "type": "string", -// "description": "Stack trace at the time the exception was thrown." -// }, -// "innerException": { -// "type": "array", -// "items": { -// "$ref": "#/definitions/ExceptionDetails" -// }, -// "description": "Details of the exception contained by this exception, -// if any." -// } -// } -// }, -void ExceptionInfoRequestHandler::operator()( - const llvm::json::Object &request) const { - llvm::json::Object response; - FillResponse(request, response); - const auto *arguments = request.getObject("arguments"); - llvm::json::Object body; - lldb::SBThread thread = dap.GetLLDBThread(*arguments); - if (thread.IsValid()) { - auto stopReason = thread.GetStopReason(); - if (stopReason == lldb::eStopReasonSignal) - body.try_emplace("exceptionId", "signal"); - else if (stopReason == lldb::eStopReasonBreakpoint) { - ExceptionBreakpoint *exc_bp = dap.GetExceptionBPFromStopReason(thread); - if (exc_bp) { - EmplaceSafeString(body, "exceptionId", exc_bp->GetFilter()); - EmplaceSafeString(body, "description", exc_bp->GetLabel()); - } else { - body.try_emplace("exceptionId", "exception"); - } +/// Retrieves the details of the exception that caused this event to be raised. +/// +/// Clients should only call this request if the corresponding capability +/// `supportsExceptionInfoRequest` is true. +llvm::Expected<ExceptionInfoResponseBody> +ExceptionInfoRequestHandler::Run(const ExceptionInfoArguments &args) const { + + lldb::SBThread thread = dap.GetLLDBThread(args.threadId); + if (!thread.IsValid()) + return llvm::make_error<DAPError>( + llvm::formatv("Invalid thread id: {}", args.threadId).str()); + + ExceptionInfoResponseBody response; + response.breakMode = eExceptionBreakModeAlways; + const lldb::StopReason stop_reason = thread.GetStopReason(); + switch (stop_reason) { + case lldb::eStopReasonSignal: + response.exceptionId = "signal"; + break; + case lldb::eStopReasonBreakpoint: { + const ExceptionBreakpoint *exc_bp = + dap.GetExceptionBPFromStopReason(thread); + if (exc_bp) { + response.exceptionId = exc_bp->GetFilter(); + response.description = exc_bp->GetLabel(); } else { - body.try_emplace("exceptionId", "exception"); + response.exceptionId = "exception"; } - if (!ObjectContainsKey(body, "description")) { - char description[1024]; - if (thread.GetStopDescription(description, sizeof(description))) { - EmplaceSafeString(body, "description", description); - } + } break; + default: + response.exceptionId = "exception"; + } + + if (response.description.empty()) { + const size_t buffer_size = thread.GetStopDescription(nullptr, 0); + if (buffer_size > 0) { + std::string &buffer = response.description; + buffer.resize(buffer_size); + thread.GetStopDescription(buffer.data(), buffer.size()); } - body.try_emplace("breakMode", "always"); - auto exception = thread.GetCurrentException(); - if (exception.IsValid()) { - llvm::json::Object details; - lldb::SBStream stream; - if (exception.GetDescription(stream)) { - EmplaceSafeString(details, "message", stream.GetData()); - } + } - auto exceptionBacktrace = thread.GetCurrentExceptionBacktrace(); - if (exceptionBacktrace.IsValid()) { - lldb::SBStream stream; - exceptionBacktrace.GetDescription(stream); - for (uint32_t i = 0; i < exceptionBacktrace.GetNumFrames(); i++) { - lldb::SBFrame frame = exceptionBacktrace.GetFrameAtIndex(i); - frame.GetDescription(stream); - } - EmplaceSafeString(details, "stackTrace", stream.GetData()); - } + if (lldb::SBValue exception = thread.GetCurrentException()) { + lldb::SBStream stream; + response.details = ExceptionDetails{}; + if (exception.GetDescription(stream)) { + response.details->message = stream.GetData(); + } + + if (lldb::SBThread exception_backtrace = + thread.GetCurrentExceptionBacktrace()) { + stream.Clear(); + exception_backtrace.GetDescription(stream); - body.try_emplace("details", std::move(details)); + for (uint32_t idx = 0; idx < exception_backtrace.GetNumFrames(); idx++) { + lldb::SBFrame frame = exception_backtrace.GetFrameAtIndex(idx); + frame.GetDescription(stream); + } + response.details->stackTrace = stream.GetData(); } - // auto excInfoCount = thread.GetStopReasonDataCount(); - // for (auto i=0; i<excInfoCount; ++i) { - // uint64_t exc_data = thread.GetStopReasonDataAtIndex(i); - // } - } else { - response["success"] = llvm::json::Value(false); } - response.try_emplace("body", std::move(body)); - dap.SendJSON(llvm::json::Value(std::move(response))); + return response; } } // namespace lldb_dap diff --git a/lldb/tools/lldb-dap/Handler/RequestHandler.cpp b/lldb/tools/lldb-dap/Handler/RequestHandler.cpp index de63c9d..d67437a 100644 --- a/lldb/tools/lldb-dap/Handler/RequestHandler.cpp +++ b/lldb/tools/lldb-dap/Handler/RequestHandler.cpp @@ -157,11 +157,12 @@ RunInTerminal(DAP &dap, const protocol::LaunchRequestArguments &arguments) { void BaseRequestHandler::Run(const Request &request) { // If this request was cancelled, send a cancelled response. if (dap.IsCancelled(request)) { - Response cancelled{/*request_seq=*/request.seq, - /*command=*/request.command, - /*success=*/false, - /*message=*/eResponseMessageCancelled, - /*body=*/std::nullopt}; + Response cancelled{ + /*request_seq=*/request.seq, + /*command=*/request.command, + /*success=*/false, + /*message=*/eResponseMessageCancelled, + }; dap.Send(cancelled); return; } diff --git a/lldb/tools/lldb-dap/Handler/RequestHandler.h b/lldb/tools/lldb-dap/Handler/RequestHandler.h index 977a247..7a7e126 100644 --- a/lldb/tools/lldb-dap/Handler/RequestHandler.h +++ b/lldb/tools/lldb-dap/Handler/RequestHandler.h @@ -302,14 +302,18 @@ public: } }; -class ExceptionInfoRequestHandler : public LegacyRequestHandler { +class ExceptionInfoRequestHandler + : public RequestHandler< + protocol::ExceptionInfoArguments, + llvm::Expected<protocol::ExceptionInfoResponseBody>> { public: - using LegacyRequestHandler::LegacyRequestHandler; + using RequestHandler::RequestHandler; static llvm::StringLiteral GetCommand() { return "exceptionInfo"; } FeatureSet GetSupportedFeatures() const override { return {protocol::eAdapterFeatureExceptionInfoRequest}; } - void operator()(const llvm::json::Object &request) const override; + llvm::Expected<protocol::ExceptionInfoResponseBody> + Run(const protocol::ExceptionInfoArguments &args) const override; }; class InitializeRequestHandler diff --git a/lldb/tools/lldb-dap/Handler/RestartRequestHandler.cpp b/lldb/tools/lldb-dap/Handler/RestartRequestHandler.cpp index 45dd7dd..100173b 100644 --- a/lldb/tools/lldb-dap/Handler/RestartRequestHandler.cpp +++ b/lldb/tools/lldb-dap/Handler/RestartRequestHandler.cpp @@ -124,6 +124,8 @@ void RestartRequestHandler::operator()( return; } + SendProcessEvent(dap, Launch); + // This is normally done after receiving a "configuration done" request. // Because we're restarting, configuration has already happened so we can // continue the process right away. diff --git a/lldb/tools/lldb-dap/JSONUtils.cpp b/lldb/tools/lldb-dap/JSONUtils.cpp index 71e91f8..2780a5b 100644 --- a/lldb/tools/lldb-dap/JSONUtils.cpp +++ b/lldb/tools/lldb-dap/JSONUtils.cpp @@ -10,6 +10,7 @@ #include "DAP.h" #include "ExceptionBreakpoint.h" #include "LLDBUtils.h" +#include "Protocol/ProtocolBase.h" #include "ProtocolUtils.h" #include "lldb/API/SBAddress.h" #include "lldb/API/SBCompileUnit.h" @@ -284,7 +285,7 @@ void FillResponse(const llvm::json::Object &request, // Fill in all of the needed response fields to a "request" and set "success" // to true by default. response.try_emplace("type", "response"); - response.try_emplace("seq", (int64_t)0); + response.try_emplace("seq", protocol::kCalculateSeq); EmplaceSafeString(response, "command", GetString(request, "command").value_or("")); const uint64_t seq = GetInteger<uint64_t>(request, "seq").value_or(0); @@ -417,7 +418,7 @@ llvm::json::Value CreateScope(const llvm::StringRef name, // } llvm::json::Object CreateEventObject(const llvm::StringRef event_name) { llvm::json::Object event; - event.try_emplace("seq", 0); + event.try_emplace("seq", protocol::kCalculateSeq); event.try_emplace("type", "event"); EmplaceSafeString(event, "event", event_name); return event; diff --git a/lldb/tools/lldb-dap/Protocol/ProtocolBase.cpp b/lldb/tools/lldb-dap/Protocol/ProtocolBase.cpp index 9cd9028..7235921 100644 --- a/lldb/tools/lldb-dap/Protocol/ProtocolBase.cpp +++ b/lldb/tools/lldb-dap/Protocol/ProtocolBase.cpp @@ -58,6 +58,8 @@ bool fromJSON(const json::Value &Params, MessageType &M, json::Path P) { } json::Value toJSON(const Request &R) { + assert(R.seq != kCalculateSeq && "invalid seq"); + json::Object Result{ {"type", "request"}, {"seq", R.seq}, @@ -103,8 +105,10 @@ bool operator==(const Request &a, const Request &b) { } json::Value toJSON(const Response &R) { + assert(R.seq != kCalculateSeq && "invalid seq"); + json::Object Result{{"type", "response"}, - {"seq", 0}, + {"seq", R.seq}, {"command", R.command}, {"request_seq", R.request_seq}, {"success", R.success}}; @@ -132,8 +136,9 @@ json::Value toJSON(const Response &R) { return std::move(Result); } -bool fromJSON(json::Value const &Params, - std::variant<ResponseMessage, std::string> &M, json::Path P) { +static bool fromJSON(json::Value const &Params, + std::variant<ResponseMessage, std::string> &M, + json::Path P) { auto rawMessage = Params.getAsString(); if (!rawMessage) { P.report("expected a string"); @@ -157,8 +162,7 @@ bool fromJSON(json::Value const &Params, Response &R, json::Path P) { return false; MessageType type; - int64_t seq; - if (!O.map("type", type) || !O.map("seq", seq) || + if (!O.map("type", type) || !O.map("seq", R.seq) || !O.map("command", R.command) || !O.map("request_seq", R.request_seq)) return false; @@ -168,12 +172,7 @@ bool fromJSON(json::Value const &Params, Response &R, json::Path P) { } if (R.command.empty()) { - P.field("command").report("expected to not be ''"); - return false; - } - - if (R.request_seq == 0) { - P.field("request_seq").report("expected to not be '0'"); + P.field("command").report("expected to not be empty"); return false; } @@ -217,9 +216,11 @@ bool fromJSON(json::Value const &Params, ErrorMessage &EM, json::Path P) { } json::Value toJSON(const Event &E) { + assert(E.seq != kCalculateSeq && "invalid seq"); + json::Object Result{ {"type", "event"}, - {"seq", 0}, + {"seq", E.seq}, {"event", E.event}, }; @@ -235,8 +236,7 @@ bool fromJSON(json::Value const &Params, Event &E, json::Path P) { return false; MessageType type; - int64_t seq; - if (!O.map("type", type) || !O.map("seq", seq) || !O.map("event", E.event)) + if (!O.map("type", type) || !O.map("seq", E.seq) || !O.map("event", E.event)) return false; if (type != eMessageTypeEvent) { @@ -244,13 +244,8 @@ bool fromJSON(json::Value const &Params, Event &E, json::Path P) { return false; } - if (seq != 0) { - P.field("seq").report("expected to be '0'"); - return false; - } - if (E.event.empty()) { - P.field("event").report("expected to not be ''"); + P.field("event").report("expected to not be empty"); return false; } diff --git a/lldb/tools/lldb-dap/Protocol/ProtocolBase.h b/lldb/tools/lldb-dap/Protocol/ProtocolBase.h index 92e41b1..42c6c88 100644 --- a/lldb/tools/lldb-dap/Protocol/ProtocolBase.h +++ b/lldb/tools/lldb-dap/Protocol/ProtocolBase.h @@ -30,19 +30,15 @@ namespace lldb_dap::protocol { // MARK: Base Protocol +/// Message unique identifier type. using Id = int64_t; +/// A unique identifier that indicates the `seq` field should be calculated by +/// the current session. +static constexpr Id kCalculateSeq = INT64_MAX; + /// A client or debug adapter initiated request. struct Request { - /// Sequence number of the message (also known as message ID). The `seq` for - /// the first message sent by a client or debug adapter is 1, and for each - /// subsequent message is 1 greater than the previous message sent by that - /// actor. `seq` can be used to order requests, responses, and events, and to - /// associate requests with their corresponding responses. For protocol - /// messages of type `request` the sequence number can be used to cancel the - /// request. - Id seq; - /// The command to execute. std::string command; @@ -50,7 +46,16 @@ struct Request { /// /// Request handlers are expected to validate the arguments, which is handled /// by `RequestHandler`. - std::optional<llvm::json::Value> arguments; + std::optional<llvm::json::Value> arguments = std::nullopt; + + /// Sequence number of the message (also known as message ID). The `seq` for + /// the first message sent by a client or debug adapter is 1, and for each + /// subsequent message is 1 greater than the previous message sent by that + /// actor. `seq` can be used to order requests, responses, and events, and to + /// associate requests with their corresponding responses. For protocol + /// messages of type `request` the sequence number can be used to cancel the + /// request. + Id seq = kCalculateSeq; }; llvm::json::Value toJSON(const Request &); bool fromJSON(const llvm::json::Value &, Request &, llvm::json::Path); @@ -62,7 +67,16 @@ struct Event { std::string event; /// Event-specific information. - std::optional<llvm::json::Value> body; + std::optional<llvm::json::Value> body = std::nullopt; + + /// Sequence number of the message (also known as message ID). The `seq` for + /// the first message sent by a client or debug adapter is 1, and for each + /// subsequent message is 1 greater than the previous message sent by that + /// actor. `seq` can be used to order requests, responses, and events, and to + /// associate requests with their corresponding responses. For protocol + /// messages of type `request` the sequence number can be used to cancel the + /// request. + Id seq = kCalculateSeq; }; llvm::json::Value toJSON(const Event &); bool fromJSON(const llvm::json::Value &, Event &, llvm::json::Path); @@ -78,7 +92,7 @@ enum ResponseMessage : unsigned { /// Response for a request. struct Response { /// Sequence number of the corresponding request. - Id request_seq; + Id request_seq = 0; /// The command requested. std::string command; @@ -87,21 +101,31 @@ struct Response { /// attribute may contain the result of the request. If the value is false, /// the attribute `message` contains the error in short form and the `body` /// may contain additional information (see `ErrorMessage`). - bool success; + bool success = false; // FIXME: Migrate usage of fallback string to ErrorMessage /// Contains the raw error in short form if `success` is false. This raw error /// might be interpreted by the client and is not shown in the UI. Some /// predefined values exist. - std::optional<std::variant<ResponseMessage, std::string>> message; + std::optional<std::variant<ResponseMessage, std::string>> message = + std::nullopt; /// Contains request result if success is true and error details if success is /// false. /// /// Request handlers are expected to build an appropriate body, see /// `RequestHandler`. - std::optional<llvm::json::Value> body; + std::optional<llvm::json::Value> body = std::nullopt; + + /// Sequence number of the message (also known as message ID). The `seq` for + /// the first message sent by a client or debug adapter is 1, and for each + /// subsequent message is 1 greater than the previous message sent by that + /// actor. `seq` can be used to order requests, responses, and events, and to + /// associate requests with their corresponding responses. For protocol + /// messages of type `request` the sequence number can be used to cancel the + /// request. + Id seq = kCalculateSeq; }; bool fromJSON(const llvm::json::Value &, Response &, llvm::json::Path); llvm::json::Value toJSON(const Response &); diff --git a/lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp b/lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp index b939335..e207aad 100644 --- a/lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp +++ b/lldb/tools/lldb-dap/Protocol/ProtocolRequests.cpp @@ -625,4 +625,22 @@ llvm::json::Value toJSON(const ModuleSymbolsResponseBody &DGMSR) { return result; } +bool fromJSON(const json::Value &Params, ExceptionInfoArguments &Args, + json::Path Path) { + json::ObjectMapper O(Params, Path); + return O && O.map("threadId", Args.threadId); +} + +json::Value toJSON(const ExceptionInfoResponseBody &ERB) { + json::Object result{{"exceptionId", ERB.exceptionId}, + {"breakMode", ERB.breakMode}}; + + if (!ERB.description.empty()) + result.insert({"description", ERB.description.c_str()}); + if (ERB.details.has_value()) + result.insert({"details", *ERB.details}); + + return result; +} + } // namespace lldb_dap::protocol diff --git a/lldb/tools/lldb-dap/Protocol/ProtocolRequests.h b/lldb/tools/lldb-dap/Protocol/ProtocolRequests.h index a85a68b..53e551a 100644 --- a/lldb/tools/lldb-dap/Protocol/ProtocolRequests.h +++ b/lldb/tools/lldb-dap/Protocol/ProtocolRequests.h @@ -1039,6 +1039,28 @@ struct ModuleSymbolsResponseBody { }; llvm::json::Value toJSON(const ModuleSymbolsResponseBody &); +struct ExceptionInfoArguments { + /// Thread for which exception information should be retrieved. + lldb::tid_t threadId = LLDB_INVALID_THREAD_ID; +}; +bool fromJSON(const llvm::json::Value &, ExceptionInfoArguments &, + llvm::json::Path); + +struct ExceptionInfoResponseBody { + /// ID of the exception that was thrown. + std::string exceptionId; + + /// Descriptive text for the exception. + std::string description; + + /// Mode that caused the exception notification to be raised. + ExceptionBreakMode breakMode; + + /// Detailed information about the exception. + std::optional<ExceptionDetails> details; +}; +llvm::json::Value toJSON(const ExceptionInfoResponseBody &); + } // namespace lldb_dap::protocol #endif diff --git a/lldb/tools/lldb-dap/Protocol/ProtocolTypes.cpp b/lldb/tools/lldb-dap/Protocol/ProtocolTypes.cpp index dc8edaa..9500701 100644 --- a/lldb/tools/lldb-dap/Protocol/ProtocolTypes.cpp +++ b/lldb/tools/lldb-dap/Protocol/ProtocolTypes.cpp @@ -1136,4 +1136,37 @@ bool fromJSON(const json::Value &Param, Variable &V, json::Path Path) { Path, /*required=*/false); } +json::Value toJSON(const ExceptionBreakMode Mode) { + switch (Mode) { + case eExceptionBreakModeNever: + return "never"; + case eExceptionBreakModeAlways: + return "always"; + case eExceptionBreakModeUnhandled: + return "unhandled"; + case eExceptionBreakModeUserUnhandled: + return "userUnhandled"; + } + llvm_unreachable("unhandled exception breakMode."); +} + +json::Value toJSON(const ExceptionDetails &ED) { + json::Object result; + + if (!ED.message.empty()) + result.insert({"message", ED.message}); + if (!ED.typeName.empty()) + result.insert({"typeName", ED.typeName}); + if (!ED.fullTypeName.empty()) + result.insert({"fullTypeName", ED.fullTypeName}); + if (!ED.evaluateName.empty()) + result.insert({"evaluateName", ED.evaluateName}); + if (!ED.stackTrace.empty()) + result.insert({"stackTrace", ED.stackTrace}); + if (!ED.innerException.empty()) + result.insert({"innerException", ED.innerException}); + + return result; +} + } // namespace lldb_dap::protocol diff --git a/lldb/tools/lldb-dap/Protocol/ProtocolTypes.h b/lldb/tools/lldb-dap/Protocol/ProtocolTypes.h index 7077df9..6d85c74 100644 --- a/lldb/tools/lldb-dap/Protocol/ProtocolTypes.h +++ b/lldb/tools/lldb-dap/Protocol/ProtocolTypes.h @@ -1007,6 +1007,36 @@ struct Variable { llvm::json::Value toJSON(const Variable &); bool fromJSON(const llvm::json::Value &, Variable &, llvm::json::Path); +enum ExceptionBreakMode : unsigned { + eExceptionBreakModeNever, + eExceptionBreakModeAlways, + eExceptionBreakModeUnhandled, + eExceptionBreakModeUserUnhandled, +}; +llvm::json::Value toJSON(ExceptionBreakMode); + +struct ExceptionDetails { + /// Message contained in the exception. + std::string message; + + /// Short type name of the exception object. + std::string typeName; + + /// Fully-qualified type name of the exception object. + std::string fullTypeName; + + /// An expression that can be evaluated in the current scope to obtain the + /// exception object. + std::string evaluateName; + + /// Stack trace at the time the exception was thrown. + std::string stackTrace; + + /// Details of the exception contained by this exception, if any. + std::vector<ExceptionDetails> innerException; +}; +llvm::json::Value toJSON(const ExceptionDetails &); + } // namespace lldb_dap::protocol #endif diff --git a/lldb/unittests/DAP/CMakeLists.txt b/lldb/unittests/DAP/CMakeLists.txt index a08414c..434f528 100644 --- a/lldb/unittests/DAP/CMakeLists.txt +++ b/lldb/unittests/DAP/CMakeLists.txt @@ -7,6 +7,7 @@ add_lldb_unittest(DAPTests Handler/ContinueTest.cpp JSONUtilsTest.cpp LLDBUtilsTest.cpp + ProtocolRequestsTest.cpp ProtocolTypesTest.cpp ProtocolUtilsTest.cpp TestBase.cpp diff --git a/lldb/unittests/DAP/ProtocolRequestsTest.cpp b/lldb/unittests/DAP/ProtocolRequestsTest.cpp new file mode 100644 index 0000000..498195d --- /dev/null +++ b/lldb/unittests/DAP/ProtocolRequestsTest.cpp @@ -0,0 +1,69 @@ +//===-- ProtocolRequestsTest.cpp ------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "Protocol/ProtocolRequests.h" +#include "Protocol/ProtocolTypes.h" +#include "TestingSupport/TestUtilities.h" +#include "llvm/Testing/Support/Error.h" +#include <gtest/gtest.h> + +using namespace llvm; +using namespace lldb_dap::protocol; +using lldb_private::PrettyPrint; +using llvm::json::parse; + +TEST(ProtocolRequestsTest, ExceptionInfoArguments) { + llvm::Expected<ExceptionInfoArguments> expected = + parse<ExceptionInfoArguments>(R"({ + "threadId": 3434 + })"); + ASSERT_THAT_EXPECTED(expected, llvm::Succeeded()); + EXPECT_EQ(expected->threadId, 3434U); + + // Check required keys; + EXPECT_THAT_EXPECTED(parse<ExceptionInfoArguments>(R"({})"), + FailedWithMessage("missing value at (root).threadId")); + + EXPECT_THAT_EXPECTED(parse<ExceptionInfoArguments>(R"({"id": 10})"), + FailedWithMessage("missing value at (root).threadId")); +} + +TEST(ProtocolRequestsTest, ExceptionInfoResponseBody) { + ExceptionInfoResponseBody body; + body.exceptionId = "signal"; + body.breakMode = eExceptionBreakModeAlways; + + // Check required keys. + Expected<json::Value> expected = parse( + R"({ + "exceptionId": "signal", + "breakMode": "always" + })"); + + ASSERT_THAT_EXPECTED(expected, llvm::Succeeded()); + EXPECT_EQ(PrettyPrint(*expected), PrettyPrint(body)); + + // Check optional keys. + body.description = "SIGNAL SIGWINCH"; + body.breakMode = eExceptionBreakModeNever; + body.details = ExceptionDetails{}; + body.details->message = "some message"; + + Expected<json::Value> expected_opt = parse( + R"({ + "exceptionId": "signal", + "description": "SIGNAL SIGWINCH", + "breakMode": "never", + "details": { + "message": "some message" + } + })"); + + ASSERT_THAT_EXPECTED(expected_opt, llvm::Succeeded()); + EXPECT_EQ(PrettyPrint(*expected_opt), PrettyPrint(body)); +} diff --git a/lldb/unittests/DAP/ProtocolTypesTest.cpp b/lldb/unittests/DAP/ProtocolTypesTest.cpp index 8170abd..6a4620a 100644 --- a/lldb/unittests/DAP/ProtocolTypesTest.cpp +++ b/lldb/unittests/DAP/ProtocolTypesTest.cpp @@ -1129,3 +1129,50 @@ TEST(ProtocolTypesTest, DataBreakpointInfoArguments) { EXPECT_THAT_EXPECTED(parse<DataBreakpointInfoArguments>(R"({"name":"data"})"), llvm::Succeeded()); } + +TEST(ProtocolTypesTest, ExceptionBreakMode) { + const std::vector<std::pair<ExceptionBreakMode, llvm::StringRef>> test_cases = + {{ExceptionBreakMode::eExceptionBreakModeAlways, "always"}, + {ExceptionBreakMode::eExceptionBreakModeNever, "never"}, + {ExceptionBreakMode::eExceptionBreakModeUnhandled, "unhandled"}, + {ExceptionBreakMode::eExceptionBreakModeUserUnhandled, "userUnhandled"}}; + + for (const auto [value, expected] : test_cases) { + json::Value const serialized = toJSON(value); + ASSERT_EQ(serialized.kind(), llvm::json::Value::Kind::String); + EXPECT_EQ(serialized.getAsString(), expected); + } +} + +TEST(ProtocolTypesTest, ExceptionDetails) { + ExceptionDetails details; + + // Check required keys. + Expected<json::Value> expected = parse(R"({})"); + ASSERT_THAT_EXPECTED(expected, llvm::Succeeded()); + EXPECT_EQ(pp(*expected), pp(details)); + + // Check optional keys. + details.message = "SIGABRT exception"; + details.typeName = "signal"; + details.fullTypeName = "SIGABRT"; + details.evaluateName = "process handle SIGABRT"; + details.stackTrace = "some stacktrace"; + ExceptionDetails inner_details; + inner_details.message = "inner message"; + details.innerException = {std::move(inner_details)}; + + Expected<json::Value> expected_opt = parse(R"({ + "message": "SIGABRT exception", + "typeName": "signal", + "fullTypeName": "SIGABRT", + "evaluateName": "process handle SIGABRT", + "stackTrace": "some stacktrace", + "innerException": [{ + "message": "inner message" + }] + })"); + + ASSERT_THAT_EXPECTED(expected_opt, llvm::Succeeded()); + EXPECT_EQ(pp(*expected_opt), pp(details)); +} diff --git a/lldb/unittests/Target/MemoryTest.cpp b/lldb/unittests/Target/MemoryTest.cpp index f7b4e97..e444f68 100644 --- a/lldb/unittests/Target/MemoryTest.cpp +++ b/lldb/unittests/Target/MemoryTest.cpp @@ -245,7 +245,7 @@ public: if (read_more_than_requested) size *= 2; uint8_t *buffer = static_cast<uint8_t *>(buf); - for (size_t addr = vm_addr; addr < vm_addr + size; addr++) + for (lldb::addr_t addr = vm_addr; addr < vm_addr + size; addr++) buffer[addr - vm_addr] = static_cast<uint8_t>(addr); // LSB of addr. return size; } diff --git a/lldb/unittests/TestingSupport/TestUtilities.cpp b/lldb/unittests/TestingSupport/TestUtilities.cpp index b53822e..d164c22 100644 --- a/lldb/unittests/TestingSupport/TestUtilities.cpp +++ b/lldb/unittests/TestingSupport/TestUtilities.cpp @@ -20,6 +20,11 @@ using namespace lldb_private; extern const char *TestMainArgv0; std::once_flag TestUtilities::g_debugger_initialize_flag; + +std::string lldb_private::PrettyPrint(const llvm::json::Value &value) { + return llvm::formatv("{0:2}", value).str(); +} + std::string lldb_private::GetInputFilePath(const llvm::Twine &name) { llvm::SmallString<128> result = llvm::sys::path::parent_path(TestMainArgv0); llvm::sys::fs::make_absolute(result); diff --git a/lldb/unittests/TestingSupport/TestUtilities.h b/lldb/unittests/TestingSupport/TestUtilities.h index cc93a68..f05d176 100644 --- a/lldb/unittests/TestingSupport/TestUtilities.h +++ b/lldb/unittests/TestingSupport/TestUtilities.h @@ -30,6 +30,10 @@ } namespace lldb_private { + +/// Returns a pretty printed json string of a `llvm::json::Value`. +std::string PrettyPrint(const llvm::json::Value &E); + std::string GetInputFilePath(const llvm::Twine &name); class TestUtilities { diff --git a/llvm/docs/CommandGuide/llc.rst b/llvm/docs/CommandGuide/llc.rst index 900649f..cc670f6 100644 --- a/llvm/docs/CommandGuide/llc.rst +++ b/llvm/docs/CommandGuide/llc.rst @@ -125,13 +125,6 @@ End-user Options Enable setting the FP exceptions build attribute not to use exceptions. -.. option:: --enable-unsafe-fp-math - - Enable optimizations that make unsafe assumptions about IEEE math (e.g. that - addition is associative) or may not work for all input ranges. These - optimizations allow the code generator to make use of some instructions which - would otherwise not be usable (such as ``fsin`` on X86). - .. option:: --stats Print statistics recorded by code-generation passes. diff --git a/llvm/docs/CommandGuide/lli.rst b/llvm/docs/CommandGuide/lli.rst index 94c0013..8afe10d 100644 --- a/llvm/docs/CommandGuide/lli.rst +++ b/llvm/docs/CommandGuide/lli.rst @@ -107,11 +107,6 @@ FLOATING POINT OPTIONS Enable optimizations that assume no NAN values. -.. option:: -enable-unsafe-fp-math - - Causes :program:`lli` to enable optimizations that may decrease floating point - precision. - .. option:: -soft-float Causes :program:`lli` to generate software floating point library calls instead of diff --git a/llvm/docs/DeveloperPolicy.rst b/llvm/docs/DeveloperPolicy.rst index b94e79e..45f2df2 100644 --- a/llvm/docs/DeveloperPolicy.rst +++ b/llvm/docs/DeveloperPolicy.rst @@ -413,6 +413,10 @@ Below are some guidelines about the format of the message itself: message self-explanatory. Note that such non-public links should not be included in the submitted code. +* Avoid 'tagging' someone's username in your commits and PR descriptions + (e.g., `@<someUser>`), doing so results in that account receiving a notification + every time the commit is cherry-picked and/or pushed to a fork. + LLVM uses a squash workflow for pull requests, so as the pull request evolves during review, it's important to update the pull request description over the course of a review. GitHub uses the initial commit message to create the pull diff --git a/llvm/docs/GlobalISel/GenericOpcode.rst b/llvm/docs/GlobalISel/GenericOpcode.rst index b055327..661a115 100644 --- a/llvm/docs/GlobalISel/GenericOpcode.rst +++ b/llvm/docs/GlobalISel/GenericOpcode.rst @@ -504,7 +504,7 @@ undefined. G_ABDS, G_ABDU ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Compute the absolute difference (signed and unsigned), e.g. abs(x-y). +Compute the absolute difference (signed and unsigned), e.g. trunc(abs(ext(x)-ext(y)). .. code-block:: none diff --git a/llvm/docs/QualGroup.rst b/llvm/docs/QualGroup.rst index 5c05e4e..0e73ec5 100644 --- a/llvm/docs/QualGroup.rst +++ b/llvm/docs/QualGroup.rst @@ -181,6 +181,46 @@ Membership Review To ensure the group remains active and focused, member participation will be reviewed every six months. Inactive members may be removed following this review. +Decision Taking +--------------- + +The LLVM Qualification Working Group aims to make decisions transparently, collaboratively, and without unnecessary formality. The goal is to maintain efficiency while encouraging broad participation and mutual understanding. + +This section describes the lightweight process used to handle proposals and decisions within the group. It may be revised as the group evolves and gains experience. + +Principles +^^^^^^^^^^ + +* **Consensus first:** The preferred mode of decision-making is consensus through open discussion (primarily on Discord or during sync-up meetings). +* **Inclusiveness and respect:** All viewpoints are encouraged, and members are expected to contribute constructively toward reaching a shared understanding. +* **Transparency:** Discussions leading to a decision should be visible to the group and, whenever appropriate, summarized in public channels (e.g., Discourse meeting notes, Discord channel, documentation updates). + +Consensus and Time Limits +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Discussions remain open until a clear consensus emerges, meaning no sustained objections have been raised after reasonable discussion. + +To prevent open-ended debates, if no new viewpoints are expressed after an agreed period (e.g., 2 weeks), the moderator (typically the person who started the discussion thread) may take one of the following actions: + +* **Summarize the apparent consensus** and close the discussion, or +* **Postpone the topic** to the next sync-up meeting if the outcome remains unclear, or +* **Call for a short vote** to confirm the group’s position. + +Voting Procedure +^^^^^^^^^^^^^^^^ + +When consensus cannot be reached or when a clear yes/no decision is needed: + +* The moderator may call for a **simple vote** using emoji reactions on Discord or a similar visible method. +* A decision passes if it receives a **majority (>50%)** of votes among **participants who voted.** Non-votes are **not counted** in the total. +* To ensure decisions reflect the collective position of the group, **at least three-quarters of the total core members** must participate in the vote for it to be considered valid. +* If results are evenly split **(50/50)**, or if participation falls below this threshold, the topic may be postponed to the next sync-up meeting for further discussion. + +Documentation +^^^^^^^^^^^^^ + +Final decisions should be briefly documented (e.g., in meeting minutes, the corresponding GitHub issue, or Discord discussion thread). Once stable, the resulting policy or outcome may be reflected in this documentation for reference. + Current Topics & Backlog ======================== @@ -205,10 +245,11 @@ Slides used to support discussions during sync-up meetings are stored in LLVM's Available slides: +* (add future entries here) +* `October 2025 <https://docs.google.com/presentation/d/1ND2SkjgcHvcEbQmMd8ExL-PpRXouP49T-wfy3xf2yRQ/edit?usp=sharing>`_ * `September 2025 <https://docs.google.com/presentation/d/1SZAE-QHfJED6CxJCCtBkPDxcw7XU9ORX54TJyXe1ppc/edit?usp=sharing>`_ * `August 2025 <https://docs.google.com/presentation/d/1K8GWoRm8ZAeyyGvTeV5f-sMOhMr7WHiEk6_Nm5Fk10o/edit?usp=sharing>`_ * `July 2025 <https://docs.google.com/presentation/d/1ktURe9qz5ggbdOQYK-2ISpiC18B-Y_35WvGyAnnxEpw/edit?usp=sharing>`_ -* (add future entries here) AI Transcription Policy ======================= diff --git a/llvm/docs/SourceLevelDebugging.rst b/llvm/docs/SourceLevelDebugging.rst index f057b2d..12b5e3e 100644 --- a/llvm/docs/SourceLevelDebugging.rst +++ b/llvm/docs/SourceLevelDebugging.rst @@ -674,7 +674,7 @@ Compiled to LLVM, this function would be represented like this: ret void, !dbg !24 } - attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } + attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" } attributes #1 = { nounwind readnone } !llvm.dbg.cu = !{!0} diff --git a/llvm/include/llvm/CodeGen/CommandFlags.h b/llvm/include/llvm/CodeGen/CommandFlags.h index 39c5a8d..af66f2d 100644 --- a/llvm/include/llvm/CodeGen/CommandFlags.h +++ b/llvm/include/llvm/CodeGen/CommandFlags.h @@ -58,8 +58,6 @@ LLVM_ABI CodeGenFileType getFileType(); LLVM_ABI FramePointerKind getFramePointerUsage(); -LLVM_ABI bool getEnableUnsafeFPMath(); - LLVM_ABI bool getEnableNoInfsFPMath(); LLVM_ABI bool getEnableNoNaNsFPMath(); diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h index b7ccfbb..8db99ba 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h @@ -210,8 +210,8 @@ struct SpecificConstantMatch { }; /// Matches a constant equal to \p RequestedValue. -inline SpecificConstantMatch m_SpecificICst(APInt RequestedValue) { - return SpecificConstantMatch(std::move(RequestedValue)); +inline SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue) { + return SpecificConstantMatch(RequestedValue); } inline SpecificConstantMatch m_SpecificICst(int64_t RequestedValue) { @@ -221,7 +221,7 @@ inline SpecificConstantMatch m_SpecificICst(int64_t RequestedValue) { /// Matcher for a specific constant splat. struct SpecificConstantSplatMatch { APInt RequestedVal; - SpecificConstantSplatMatch(const APInt RequestedVal) + SpecificConstantSplatMatch(const APInt &RequestedVal) : RequestedVal(RequestedVal) {} bool match(const MachineRegisterInfo &MRI, Register Reg) { return isBuildVectorConstantSplat(Reg, MRI, RequestedVal, @@ -230,8 +230,9 @@ struct SpecificConstantSplatMatch { }; /// Matches a constant splat of \p RequestedValue. -inline SpecificConstantSplatMatch m_SpecificICstSplat(APInt RequestedValue) { - return SpecificConstantSplatMatch(std::move(RequestedValue)); +inline SpecificConstantSplatMatch +m_SpecificICstSplat(const APInt &RequestedValue) { + return SpecificConstantSplatMatch(RequestedValue); } inline SpecificConstantSplatMatch m_SpecificICstSplat(int64_t RequestedValue) { @@ -242,7 +243,7 @@ inline SpecificConstantSplatMatch m_SpecificICstSplat(int64_t RequestedValue) { /// Matcher for a specific constant or constant splat. struct SpecificConstantOrSplatMatch { APInt RequestedVal; - SpecificConstantOrSplatMatch(const APInt RequestedVal) + SpecificConstantOrSplatMatch(const APInt &RequestedVal) : RequestedVal(RequestedVal) {} bool match(const MachineRegisterInfo &MRI, Register Reg) { APInt MatchedVal; @@ -263,8 +264,8 @@ struct SpecificConstantOrSplatMatch { /// Matches a \p RequestedValue constant or a constant splat of \p /// RequestedValue. inline SpecificConstantOrSplatMatch -m_SpecificICstOrSplat(APInt RequestedValue) { - return SpecificConstantOrSplatMatch(std::move(RequestedValue)); +m_SpecificICstOrSplat(const APInt &RequestedValue) { + return SpecificConstantOrSplatMatch(RequestedValue); } inline SpecificConstantOrSplatMatch diff --git a/llvm/include/llvm/CodeGen/SDPatternMatch.h b/llvm/include/llvm/CodeGen/SDPatternMatch.h index 201dc68..0dcf400 100644 --- a/llvm/include/llvm/CodeGen/SDPatternMatch.h +++ b/llvm/include/llvm/CodeGen/SDPatternMatch.h @@ -559,6 +559,11 @@ m_VSelect(const T0_P &Cond, const T1_P &T, const T2_P &F) { } template <typename T0_P, typename T1_P, typename T2_P> +inline auto m_SelectLike(const T0_P &Cond, const T1_P &T, const T2_P &F) { + return m_AnyOf(m_Select(Cond, T, F), m_VSelect(Cond, T, F)); +} + +template <typename T0_P, typename T1_P, typename T2_P> inline Result_match<0, TernaryOpc_match<T0_P, T1_P, T2_P>> m_Load(const T0_P &Ch, const T1_P &Ptr, const T2_P &Offset) { return m_Result<0>( diff --git a/llvm/include/llvm/IR/Attributes.td b/llvm/include/llvm/IR/Attributes.td index 8e7d9dc..8ce2b1b 100644 --- a/llvm/include/llvm/IR/Attributes.td +++ b/llvm/include/llvm/IR/Attributes.td @@ -410,7 +410,6 @@ def LessPreciseFPMAD : StrBoolAttr<"less-precise-fpmad">; def NoInfsFPMath : StrBoolAttr<"no-infs-fp-math">; def NoNansFPMath : StrBoolAttr<"no-nans-fp-math">; def NoSignedZerosFPMath : StrBoolAttr<"no-signed-zeros-fp-math">; -def UnsafeFPMath : StrBoolAttr<"unsafe-fp-math">; def NoJumpTables : StrBoolAttr<"no-jump-tables">; def NoInlineLineTables : StrBoolAttr<"no-inline-line-tables">; def ProfileSampleAccurate : StrBoolAttr<"profile-sample-accurate">; @@ -474,7 +473,6 @@ def : MergeRule<"setAND<LessPreciseFPMADAttr>">; def : MergeRule<"setAND<NoInfsFPMathAttr>">; def : MergeRule<"setAND<NoNansFPMathAttr>">; def : MergeRule<"setAND<NoSignedZerosFPMathAttr>">; -def : MergeRule<"setAND<UnsafeFPMathAttr>">; def : MergeRule<"setOR<NoImplicitFloatAttr>">; def : MergeRule<"setOR<NoJumpTablesAttr>">; def : MergeRule<"setOR<ProfileSampleAccurateAttr>">; diff --git a/llvm/include/llvm/IR/AutoUpgrade.h b/llvm/include/llvm/IR/AutoUpgrade.h index 31096e8..540d60a 100644 --- a/llvm/include/llvm/IR/AutoUpgrade.h +++ b/llvm/include/llvm/IR/AutoUpgrade.h @@ -96,6 +96,16 @@ namespace llvm { /// info. Return true if module is modified. LLVM_ABI bool UpgradeDebugInfo(Module &M); + /// Copies module attributes to the functions in the module. + /// Currently only effects ARM, Thumb and AArch64 targets. + /// Supported attributes: + /// - branch-target-enforcement + /// - branch-protection-pauth-lr + /// - guarded-control-stack + /// - sign-return-address + /// - sign-return-address-with-bkey + void copyModuleAttrToFunctions(Module &M); + /// Check whether a string looks like an old loop attachment tag. inline bool mayBeOldLoopAttachmentTag(StringRef Name) { return Name.starts_with("llvm.vectorizer."); diff --git a/llvm/include/llvm/Support/AllocToken.h b/llvm/include/llvm/Support/AllocToken.h new file mode 100644 index 0000000..e40d816 --- /dev/null +++ b/llvm/include/llvm/Support/AllocToken.h @@ -0,0 +1,68 @@ +//===- llvm/Support/AllocToken.h - Allocation Token Calculation -----*- C++ -*// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Definition of AllocToken modes and shared calculation of stateless token IDs. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_ALLOCTOKEN_H +#define LLVM_SUPPORT_ALLOCTOKEN_H + +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringRef.h" +#include <cstdint> +#include <optional> + +namespace llvm { + +/// Modes for generating allocation token IDs. +enum class AllocTokenMode { + /// Incrementally increasing token ID. + Increment, + + /// Simple mode that returns a statically-assigned random token ID. + Random, + + /// Token ID based on allocated type hash. + TypeHash, + + /// Token ID based on allocated type hash, where the top half ID-space is + /// reserved for types that contain pointers and the bottom half for types + /// that do not contain pointers. + TypeHashPointerSplit, +}; + +/// The default allocation token mode. +inline constexpr AllocTokenMode DefaultAllocTokenMode = + AllocTokenMode::TypeHashPointerSplit; + +/// Returns the AllocTokenMode from its canonical string name; if an invalid +/// name was provided returns nullopt. +LLVM_ABI std::optional<AllocTokenMode> +getAllocTokenModeFromString(StringRef Name); + +/// Metadata about an allocation used to generate a token ID. +struct AllocTokenMetadata { + SmallString<64> TypeName; + bool ContainsPointer; +}; + +/// Calculates stable allocation token ID. Returns std::nullopt for stateful +/// modes that are only available in the AllocToken pass. +/// +/// \param Mode The token generation mode. +/// \param Metadata The metadata about the allocation. +/// \param MaxTokens The maximum number of tokens (must not be 0) +/// \return The calculated allocation token ID, or std::nullopt. +LLVM_ABI std::optional<uint64_t> +getAllocToken(AllocTokenMode Mode, const AllocTokenMetadata &Metadata, + uint64_t MaxTokens); + +} // end namespace llvm + +#endif // LLVM_SUPPORT_ALLOCTOKEN_H diff --git a/llvm/include/llvm/Target/TargetOptions.h b/llvm/include/llvm/Target/TargetOptions.h index 2c2122a..bfd2817 100644 --- a/llvm/include/llvm/Target/TargetOptions.h +++ b/llvm/include/llvm/Target/TargetOptions.h @@ -118,9 +118,8 @@ enum CodeObjectVersionKind { class TargetOptions { public: TargetOptions() - : UnsafeFPMath(false), NoInfsFPMath(false), NoNaNsFPMath(false), - NoTrappingFPMath(true), NoSignedZerosFPMath(false), - EnableAIXExtendedAltivecABI(false), + : NoInfsFPMath(false), NoNaNsFPMath(false), NoTrappingFPMath(true), + NoSignedZerosFPMath(false), EnableAIXExtendedAltivecABI(false), HonorSignDependentRoundingFPMathOption(false), NoZerosInBSS(false), GuaranteedTailCallOpt(false), StackSymbolOrdering(true), EnableFastISel(false), EnableGlobalISel(false), UseInitArray(false), @@ -156,13 +155,6 @@ public: /// MCAsmInfo::BinutilsVersion. std::pair<int, int> BinutilsVersion{0, 0}; - /// UnsafeFPMath - This flag is enabled when the - /// -enable-unsafe-fp-math flag is specified on the command line. When - /// this flag is off (the default), the code generator is not allowed to - /// produce results that are "less precise" than IEEE allows. This includes - /// use of X86 instructions like FSIN and FCOS instead of libcalls. - unsigned UnsafeFPMath : 1; - /// NoInfsFPMath - This flag is enabled when the /// -enable-no-infs-fp-math flag is specified on the command line. When /// this flag is off (the default), the code generator is not allowed to diff --git a/llvm/include/llvm/Transforms/Instrumentation/AllocToken.h b/llvm/include/llvm/Transforms/Instrumentation/AllocToken.h index b1391cb0..077703c 100644 --- a/llvm/include/llvm/Transforms/Instrumentation/AllocToken.h +++ b/llvm/include/llvm/Transforms/Instrumentation/AllocToken.h @@ -16,6 +16,7 @@ #include "llvm/IR/Analysis.h" #include "llvm/IR/PassManager.h" +#include "llvm/Support/AllocToken.h" #include <optional> namespace llvm { @@ -23,6 +24,7 @@ namespace llvm { class Module; struct AllocTokenOptions { + AllocTokenMode Mode = DefaultAllocTokenMode; std::optional<uint64_t> MaxTokens; bool FastABI = false; bool Extended = false; diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index b573023..8da51d0 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -4866,89 +4866,6 @@ static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F, return nullptr; } -/// Look for the following pattern and simplify %to_fold to %identicalPhi. -/// Here %phi, %to_fold and %phi.next perform the same functionality as -/// %identicalPhi and hence the select instruction %to_fold can be folded -/// into %identicalPhi. -/// -/// BB1: -/// %identicalPhi = phi [ X, %BB0 ], [ %identicalPhi.next, %BB1 ] -/// %phi = phi [ X, %BB0 ], [ %phi.next, %BB1 ] -/// ... -/// %identicalPhi.next = select %cmp, %val, %identicalPhi -/// (or select %cmp, %identicalPhi, %val) -/// %to_fold = select %cmp2, %identicalPhi, %phi -/// %phi.next = select %cmp, %val, %to_fold -/// (or select %cmp, %to_fold, %val) -/// -/// Prove that %phi and %identicalPhi are the same by induction: -/// -/// Base case: Both %phi and %identicalPhi are equal on entry to the loop. -/// Inductive case: -/// Suppose %phi and %identicalPhi are equal at iteration i. -/// We look at their values at iteration i+1 which are %phi.next and -/// %identicalPhi.next. They would have become different only when %cmp is -/// false and the corresponding values %to_fold and %identicalPhi differ -/// (similar reason for the other "or" case in the bracket). -/// -/// The only condition when %to_fold and %identicalPh could differ is when %cmp2 -/// is false and %to_fold is %phi, which contradicts our inductive hypothesis -/// that %phi and %identicalPhi are equal. Thus %phi and %identicalPhi are -/// always equal at iteration i+1. -bool isSimplifierIdenticalPHI(PHINode &PN, PHINode &IdenticalPN) { - if (PN.getParent() != IdenticalPN.getParent()) - return false; - if (PN.getNumIncomingValues() != 2) - return false; - - // Check that only the backedge incoming value is different. - unsigned DiffVals = 0; - BasicBlock *DiffValBB = nullptr; - for (unsigned i = 0; i < 2; i++) { - BasicBlock *PredBB = PN.getIncomingBlock(i); - if (PN.getIncomingValueForBlock(PredBB) != - IdenticalPN.getIncomingValueForBlock(PredBB)) { - DiffVals++; - DiffValBB = PredBB; - } - } - if (DiffVals != 1) - return false; - // Now check that the backedge incoming values are two select - // instructions with the same condition. Either their true - // values are the same, or their false values are the same. - auto *SI = dyn_cast<SelectInst>(PN.getIncomingValueForBlock(DiffValBB)); - auto *IdenticalSI = - dyn_cast<SelectInst>(IdenticalPN.getIncomingValueForBlock(DiffValBB)); - if (!SI || !IdenticalSI) - return false; - if (SI->getCondition() != IdenticalSI->getCondition()) - return false; - - SelectInst *SIOtherVal = nullptr; - Value *IdenticalSIOtherVal = nullptr; - if (SI->getTrueValue() == IdenticalSI->getTrueValue()) { - SIOtherVal = dyn_cast<SelectInst>(SI->getFalseValue()); - IdenticalSIOtherVal = IdenticalSI->getFalseValue(); - } else if (SI->getFalseValue() == IdenticalSI->getFalseValue()) { - SIOtherVal = dyn_cast<SelectInst>(SI->getTrueValue()); - IdenticalSIOtherVal = IdenticalSI->getTrueValue(); - } else { - return false; - } - - // Now check that the other values in select, i.e., %to_fold and - // %identicalPhi, are essentially the same value. - if (!SIOtherVal || IdenticalSIOtherVal != &IdenticalPN) - return false; - if (!(SIOtherVal->getTrueValue() == &IdenticalPN && - SIOtherVal->getFalseValue() == &PN) && - !(SIOtherVal->getTrueValue() == &PN && - SIOtherVal->getFalseValue() == &IdenticalPN)) - return false; - return true; -} - /// Given operands for a SelectInst, see if we can fold the result. /// If not, this returns null. static Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, @@ -5124,14 +5041,7 @@ static Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, std::optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL); if (Imp) return *Imp ? TrueVal : FalseVal; - // Look for same PHIs in the true and false values. - if (auto *TruePHI = dyn_cast<PHINode>(TrueVal)) - if (auto *FalsePHI = dyn_cast<PHINode>(FalseVal)) { - if (isSimplifierIdenticalPHI(*TruePHI, *FalsePHI)) - return FalseVal; - if (isSimplifierIdenticalPHI(*FalsePHI, *TruePHI)) - return TrueVal; - } + return nullptr; } diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp index cf63285..f71a534 100644 --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -451,6 +451,7 @@ bool LLParser::validateEndOfModule(bool UpgradeDebugInfo) { UpgradeModuleFlags(*M); UpgradeNVVMAnnotations(*M); UpgradeSectionAttributes(*M); + copyModuleAttrToFunctions(*M); if (!Slots) return false; diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp index aaee1f0..cf7efbfa 100644 --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -7143,6 +7143,8 @@ Error BitcodeReader::materializeModule() { UpgradeARCRuntime(*TheModule); + copyModuleAttrToFunctions(*TheModule); + return Error::success(); } diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index e2af0c5..fefde64f 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -1438,6 +1438,7 @@ getBBAddrMapFeature(const MachineFunction &MF, int NumMBBSectionRanges, BBFreqEnabled, BrProbEnabled, MF.hasBBSections() && NumMBBSectionRanges > 1, + // Use static_cast to avoid breakage of tests on windows. static_cast<bool>(BBAddrMapSkipEmitBBEntries), HasCalls, false}; diff --git a/llvm/lib/CodeGen/CommandFlags.cpp b/llvm/lib/CodeGen/CommandFlags.cpp index 0522698..c1365f4 100644 --- a/llvm/lib/CodeGen/CommandFlags.cpp +++ b/llvm/lib/CodeGen/CommandFlags.cpp @@ -64,7 +64,6 @@ CGOPT_EXP(uint64_t, LargeDataThreshold) CGOPT(ExceptionHandling, ExceptionModel) CGOPT_EXP(CodeGenFileType, FileType) CGOPT(FramePointerKind, FramePointerUsage) -CGOPT(bool, EnableUnsafeFPMath) CGOPT(bool, EnableNoInfsFPMath) CGOPT(bool, EnableNoNaNsFPMath) CGOPT(bool, EnableNoSignedZerosFPMath) @@ -219,12 +218,6 @@ codegen::RegisterCodeGenFlags::RegisterCodeGenFlags() { "Enable frame pointer elimination"))); CGBINDOPT(FramePointerUsage); - static cl::opt<bool> EnableUnsafeFPMath( - "enable-unsafe-fp-math", - cl::desc("Enable optimizations that may decrease FP precision"), - cl::init(false)); - CGBINDOPT(EnableUnsafeFPMath); - static cl::opt<bool> EnableNoInfsFPMath( "enable-no-infs-fp-math", cl::desc("Enable FP math optimizations that assume no +-Infs"), @@ -552,7 +545,6 @@ TargetOptions codegen::InitTargetOptionsFromCodeGenFlags(const Triple &TheTriple) { TargetOptions Options; Options.AllowFPOpFusion = getFuseFPOps(); - Options.UnsafeFPMath = getEnableUnsafeFPMath(); Options.NoInfsFPMath = getEnableNoInfsFPMath(); Options.NoNaNsFPMath = getEnableNoNaNsFPMath(); Options.NoSignedZerosFPMath = getEnableNoSignedZerosFPMath(); @@ -706,7 +698,6 @@ void codegen::setFunctionAttributes(StringRef CPU, StringRef Features, if (getStackRealign()) NewAttrs.addAttribute("stackrealign"); - HANDLE_BOOL_ATTR(EnableUnsafeFPMathView, "unsafe-fp-math"); HANDLE_BOOL_ATTR(EnableNoInfsFPMathView, "no-infs-fp-math"); HANDLE_BOOL_ATTR(EnableNoNaNsFPMathView, "no-nans-fp-math"); HANDLE_BOOL_ATTR(EnableNoSignedZerosFPMathView, "no-signed-zeros-fp-math"); diff --git a/llvm/lib/CodeGen/MachineLICM.cpp b/llvm/lib/CodeGen/MachineLICM.cpp index 7acddff..729e73c 100644 --- a/llvm/lib/CodeGen/MachineLICM.cpp +++ b/llvm/lib/CodeGen/MachineLICM.cpp @@ -932,12 +932,11 @@ void MachineLICMImpl::InitRegPressure(MachineBasicBlock *BB) { void MachineLICMImpl::UpdateRegPressure(const MachineInstr *MI, bool ConsiderUnseenAsDef) { auto Cost = calcRegisterCost(MI, /*ConsiderSeen=*/true, ConsiderUnseenAsDef); - for (const auto &RPIdAndCost : Cost) { - unsigned Class = RPIdAndCost.first; - if (static_cast<int>(RegPressure[Class]) < -RPIdAndCost.second) + for (const auto &[Class, Weight] : Cost) { + if (static_cast<int>(RegPressure[Class]) < -Weight) RegPressure[Class] = 0; else - RegPressure[Class] += RPIdAndCost.second; + RegPressure[Class] += Weight; } } @@ -1215,11 +1214,10 @@ bool MachineLICMImpl::IsCheapInstruction(MachineInstr &MI) const { /// given cost matrix can cause high register pressure. bool MachineLICMImpl::CanCauseHighRegPressure( const SmallDenseMap<unsigned, int> &Cost, bool CheapInstr) { - for (const auto &RPIdAndCost : Cost) { - if (RPIdAndCost.second <= 0) + for (const auto &[Class, Weight] : Cost) { + if (Weight <= 0) continue; - unsigned Class = RPIdAndCost.first; int Limit = RegLimit[Class]; // Don't hoist cheap instructions if they would increase register pressure, @@ -1228,7 +1226,7 @@ bool MachineLICMImpl::CanCauseHighRegPressure( return true; for (const auto &RP : BackTrace) - if (static_cast<int>(RP[Class]) + RPIdAndCost.second >= Limit) + if (static_cast<int>(RP[Class]) + Weight >= Limit) return true; } @@ -1246,8 +1244,8 @@ void MachineLICMImpl::UpdateBackTraceRegPressure(const MachineInstr *MI) { // Update register pressure of blocks from loop header to current block. for (auto &RP : BackTrace) - for (const auto &RPIdAndCost : Cost) - RP[RPIdAndCost.first] += RPIdAndCost.second; + for (const auto &[Class, Weight] : Cost) + RP[Class] += Weight; } /// Return true if it is potentially profitable to hoist the given loop diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 310d35d..d2ea652 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2476,16 +2476,17 @@ static bool canFoldInAddressingMode(SDNode *N, SDNode *Use, SelectionDAG &DAG, /// masked vector operation if the target supports it. static SDValue foldSelectWithIdentityConstant(SDNode *N, SelectionDAG &DAG, bool ShouldCommuteOperands) { - // Match a select as operand 1. The identity constant that we are looking for - // is only valid as operand 1 of a non-commutative binop. SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); + + // Match a select as operand 1. The identity constant that we are looking for + // is only valid as operand 1 of a non-commutative binop. if (ShouldCommuteOperands) std::swap(N0, N1); - unsigned SelOpcode = N1.getOpcode(); - if ((SelOpcode != ISD::VSELECT && SelOpcode != ISD::SELECT) || - !N1.hasOneUse()) + SDValue Cond, TVal, FVal; + if (!sd_match(N1, m_OneUse(m_SelectLike(m_Value(Cond), m_Value(TVal), + m_Value(FVal))))) return SDValue(); // We can't hoist all instructions because of immediate UB (not speculatable). @@ -2493,11 +2494,9 @@ static SDValue foldSelectWithIdentityConstant(SDNode *N, SelectionDAG &DAG, if (!DAG.isSafeToSpeculativelyExecuteNode(N)) return SDValue(); + unsigned SelOpcode = N1.getOpcode(); unsigned Opcode = N->getOpcode(); EVT VT = N->getValueType(0); - SDValue Cond = N1.getOperand(0); - SDValue TVal = N1.getOperand(1); - SDValue FVal = N1.getOperand(2); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); // This transform increases uses of N0, so freeze it to be safe. @@ -13856,12 +13855,11 @@ static SDValue tryToFoldExtendSelectLoad(SDNode *N, const TargetLowering &TLI, Opcode == ISD::ANY_EXTEND) && "Expected EXTEND dag node in input!"); - if (!(N0->getOpcode() == ISD::SELECT || N0->getOpcode() == ISD::VSELECT) || - !N0.hasOneUse()) + SDValue Cond, Op1, Op2; + if (!sd_match(N0, m_OneUse(m_SelectLike(m_Value(Cond), m_Value(Op1), + m_Value(Op2))))) return SDValue(); - SDValue Op1 = N0->getOperand(1); - SDValue Op2 = N0->getOperand(2); if (!isCompatibleLoad(Op1, Opcode) || !isCompatibleLoad(Op2, Opcode)) return SDValue(); @@ -13883,7 +13881,7 @@ static SDValue tryToFoldExtendSelectLoad(SDNode *N, const TargetLowering &TLI, SDValue Ext1 = DAG.getNode(Opcode, DL, VT, Op1); SDValue Ext2 = DAG.getNode(Opcode, DL, VT, Op2); - return DAG.getSelect(DL, VT, N0->getOperand(0), Ext1, Ext2); + return DAG.getSelect(DL, VT, Cond, Ext1, Ext2); } /// Try to fold a sext/zext/aext dag node into a ConstantSDNode or @@ -17462,8 +17460,8 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) { // fold (fsub (fpext (fneg (fmul, x, y))), z) // -> (fneg (fma (fpext x), (fpext y), z)) // Note: This could be removed with appropriate canonicalization of the - // input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the - // orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent + // input expression into (fneg (fadd (fpext (fmul, x, y)), z)). However, the + // command line flag -fp-contract=fast and fast-math flag contract prevent // from implementing the canonicalization in visitFSUB. if (matcher.match(N0, ISD::FP_EXTEND)) { SDValue N00 = N0.getOperand(0); @@ -17487,7 +17485,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) { // -> (fneg (fma (fpext x)), (fpext y), z) // Note: This could be removed with appropriate canonicalization of the // input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the - // orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent + // command line flag -fp-contract=fast and fast-math flag contract prevent // from implementing the canonicalization in visitFSUB. if (matcher.match(N0, ISD::FNEG)) { SDValue N00 = N0.getOperand(0); @@ -29620,13 +29618,14 @@ static SDValue takeInexpensiveLog2(SelectionDAG &DAG, const SDLoc &DL, EVT VT, } // c ? X : Y -> c ? Log2(X) : Log2(Y) - if ((Op.getOpcode() == ISD::SELECT || Op.getOpcode() == ISD::VSELECT) && - Op.hasOneUse()) { - if (SDValue LogX = takeInexpensiveLog2(DAG, DL, VT, Op.getOperand(1), - Depth + 1, AssumeNonZero)) - if (SDValue LogY = takeInexpensiveLog2(DAG, DL, VT, Op.getOperand(2), - Depth + 1, AssumeNonZero)) - return DAG.getSelect(DL, VT, Op.getOperand(0), LogX, LogY); + SDValue Cond, TVal, FVal; + if (sd_match(Op, m_OneUse(m_SelectLike(m_Value(Cond), m_Value(TVal), + m_Value(FVal))))) { + if (SDValue LogX = + takeInexpensiveLog2(DAG, DL, VT, TVal, Depth + 1, AssumeNonZero)) + if (SDValue LogY = + takeInexpensiveLog2(DAG, DL, VT, FVal, Depth + 1, AssumeNonZero)) + return DAG.getSelect(DL, VT, Cond, LogX, LogY); } // log2(umin(X, Y)) -> umin(log2(X), log2(Y)) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 20a0efd..dcf2df3 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -1977,8 +1977,13 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) { if (const Instruction *Inst = dyn_cast<Instruction>(V)) { Register InReg = FuncInfo.InitializeRegForValue(Inst); + std::optional<CallingConv::ID> CallConv; + auto *CI = dyn_cast<CallInst>(Inst); + if (CI && !CI->isInlineAsm()) + CallConv = CI->getCallingConv(); + RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg, - Inst->getType(), std::nullopt); + Inst->getType(), CallConv); SDValue Chain = DAG.getEntryNode(); return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V); } diff --git a/llvm/lib/CodeGen/ShrinkWrap.cpp b/llvm/lib/CodeGen/ShrinkWrap.cpp index 826e412..8358105 100644 --- a/llvm/lib/CodeGen/ShrinkWrap.cpp +++ b/llvm/lib/CodeGen/ShrinkWrap.cpp @@ -319,7 +319,7 @@ bool ShrinkWrapImpl::useOrDefCSROrFI(const MachineInstr &MI, RegScavenger *RS, return isa<GlobalValue>(UO); } if (const PseudoSourceValue *PSV = Op->getPseudoValue()) - return PSV->isJumpTable(); + return PSV->isJumpTable() || PSV->isConstantPool(); return false; }; // Load/store operations may access the stack indirectly when we previously diff --git a/llvm/lib/CodeGen/TargetOptionsImpl.cpp b/llvm/lib/CodeGen/TargetOptionsImpl.cpp index 5eb86e7..049efe8 100644 --- a/llvm/lib/CodeGen/TargetOptionsImpl.cpp +++ b/llvm/lib/CodeGen/TargetOptionsImpl.cpp @@ -51,7 +51,7 @@ bool TargetOptions::FramePointerIsReserved(const MachineFunction &MF) const { /// HonorSignDependentRoundingFPMath - Return true if the codegen must assume /// that the rounding mode of the FPU can change from its default. bool TargetOptions::HonorSignDependentRoundingFPMath() const { - return !UnsafeFPMath && HonorSignDependentRoundingFPMathOption; + return HonorSignDependentRoundingFPMathOption; } /// NOTE: There are targets that still do not support the debug entry values diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp index 10f915d..b838e36 100644 --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -5262,33 +5262,47 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) { return; } + auto GetMaybeAlign = [](Value *Op) { + if (auto *CI = dyn_cast<ConstantInt>(Op)) { + uint64_t Val = CI->getZExtValue(); + if (Val == 0) + return MaybeAlign(); + if (isPowerOf2_64(Val)) + return MaybeAlign(Val); + } + reportFatalUsageError("Invalid alignment argument"); + }; + auto GetAlign = [&](Value *Op) { + MaybeAlign Align = GetMaybeAlign(Op); + if (Align) + return *Align; + reportFatalUsageError("Invalid zero alignment argument"); + }; + const DataLayout &DL = CI->getDataLayout(); switch (NewFn->getIntrinsicID()) { case Intrinsic::masked_load: NewCall = Builder.CreateMaskedLoad( - CI->getType(), CI->getArgOperand(0), - cast<ConstantInt>(CI->getArgOperand(1))->getAlignValue(), + CI->getType(), CI->getArgOperand(0), GetAlign(CI->getArgOperand(1)), CI->getArgOperand(2), CI->getArgOperand(3)); break; case Intrinsic::masked_gather: NewCall = Builder.CreateMaskedGather( CI->getType(), CI->getArgOperand(0), - DL.getValueOrABITypeAlignment( - cast<ConstantInt>(CI->getArgOperand(1))->getMaybeAlignValue(), - CI->getType()->getScalarType()), + DL.getValueOrABITypeAlignment(GetMaybeAlign(CI->getArgOperand(1)), + CI->getType()->getScalarType()), CI->getArgOperand(2), CI->getArgOperand(3)); break; case Intrinsic::masked_store: NewCall = Builder.CreateMaskedStore( CI->getArgOperand(0), CI->getArgOperand(1), - cast<ConstantInt>(CI->getArgOperand(2))->getAlignValue(), - CI->getArgOperand(3)); + GetAlign(CI->getArgOperand(2)), CI->getArgOperand(3)); break; case Intrinsic::masked_scatter: NewCall = Builder.CreateMaskedScatter( CI->getArgOperand(0), CI->getArgOperand(1), DL.getValueOrABITypeAlignment( - cast<ConstantInt>(CI->getArgOperand(2))->getMaybeAlignValue(), + GetMaybeAlign(CI->getArgOperand(2)), CI->getArgOperand(0)->getType()->getScalarType()), CI->getArgOperand(3)); break; @@ -6045,6 +6059,120 @@ void llvm::UpgradeFunctionAttributes(Function &F) { } } +// Check if the function attribute is not present and set it. +static void setFunctionAttrIfNotSet(Function &F, StringRef FnAttrName, + StringRef Value) { + if (!F.hasFnAttribute(FnAttrName)) + F.addFnAttr(FnAttrName, Value); +} + +// Check if the function attribute is not present and set it if needed. +// If the attribute is "false" then removes it. +// If the attribute is "true" resets it to a valueless attribute. +static void ConvertFunctionAttr(Function &F, bool Set, StringRef FnAttrName) { + if (!F.hasFnAttribute(FnAttrName)) { + if (Set) + F.addFnAttr(FnAttrName); + } else { + auto A = F.getFnAttribute(FnAttrName); + if ("false" == A.getValueAsString()) + F.removeFnAttr(FnAttrName); + else if ("true" == A.getValueAsString()) { + F.removeFnAttr(FnAttrName); + F.addFnAttr(FnAttrName); + } + } +} + +void llvm::copyModuleAttrToFunctions(Module &M) { + Triple T(M.getTargetTriple()); + if (!T.isThumb() && !T.isARM() && !T.isAArch64()) + return; + + uint64_t BTEValue = 0; + uint64_t BPPLRValue = 0; + uint64_t GCSValue = 0; + uint64_t SRAValue = 0; + uint64_t SRAALLValue = 0; + uint64_t SRABKeyValue = 0; + + NamedMDNode *ModFlags = M.getModuleFlagsMetadata(); + if (ModFlags) { + for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) { + MDNode *Op = ModFlags->getOperand(I); + if (Op->getNumOperands() != 3) + continue; + + MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1)); + auto *CI = mdconst::dyn_extract<ConstantInt>(Op->getOperand(2)); + if (!ID || !CI) + continue; + + StringRef IDStr = ID->getString(); + uint64_t *ValPtr = IDStr == "branch-target-enforcement" ? &BTEValue + : IDStr == "branch-protection-pauth-lr" ? &BPPLRValue + : IDStr == "guarded-control-stack" ? &GCSValue + : IDStr == "sign-return-address" ? &SRAValue + : IDStr == "sign-return-address-all" ? &SRAALLValue + : IDStr == "sign-return-address-with-bkey" + ? &SRABKeyValue + : nullptr; + if (!ValPtr) + continue; + + *ValPtr = CI->getZExtValue(); + if (*ValPtr == 2) + return; + } + } + + bool BTE = BTEValue == 1; + bool BPPLR = BPPLRValue == 1; + bool GCS = GCSValue == 1; + bool SRA = SRAValue == 1; + + StringRef SignTypeValue = "non-leaf"; + if (SRA && SRAALLValue == 1) + SignTypeValue = "all"; + + StringRef SignKeyValue = "a_key"; + if (SRA && SRABKeyValue == 1) + SignKeyValue = "b_key"; + + for (Function &F : M.getFunctionList()) { + if (F.isDeclaration()) + continue; + + if (SRA) { + setFunctionAttrIfNotSet(F, "sign-return-address", SignTypeValue); + setFunctionAttrIfNotSet(F, "sign-return-address-key", SignKeyValue); + } else { + if (auto A = F.getFnAttribute("sign-return-address"); + A.isValid() && "none" == A.getValueAsString()) { + F.removeFnAttr("sign-return-address"); + F.removeFnAttr("sign-return-address-key"); + } + } + ConvertFunctionAttr(F, BTE, "branch-target-enforcement"); + ConvertFunctionAttr(F, BPPLR, "branch-protection-pauth-lr"); + ConvertFunctionAttr(F, GCS, "guarded-control-stack"); + } + + if (BTE) + M.setModuleFlag(llvm::Module::Min, "branch-target-enforcement", 2); + if (BPPLR) + M.setModuleFlag(llvm::Module::Min, "branch-protection-pauth-lr", 2); + if (GCS) + M.setModuleFlag(llvm::Module::Min, "guarded-control-stack", 2); + if (SRA) { + M.setModuleFlag(llvm::Module::Min, "sign-return-address", 2); + if (SRAALLValue == 1) + M.setModuleFlag(llvm::Module::Min, "sign-return-address-all", 2); + if (SRABKeyValue == 1) + M.setModuleFlag(llvm::Module::Min, "sign-return-address-with-bkey", 2); + } +} + static bool isOldLoopArgument(Metadata *MD) { auto *T = dyn_cast_or_null<MDTuple>(MD); if (!T) diff --git a/llvm/lib/Linker/IRMover.cpp b/llvm/lib/Linker/IRMover.cpp index 1bff6cd..f78d9b0 100644 --- a/llvm/lib/Linker/IRMover.cpp +++ b/llvm/lib/Linker/IRMover.cpp @@ -1512,6 +1512,11 @@ Error IRLinker::run() { // Loop over all of the linked values to compute type mappings. computeTypeMapping(); + // Convert module level attributes to function level attributes because + // after merging modules the attributes might change and would have different + // effect on the functions as the original module would have. + copyModuleAttrToFunctions(*SrcM); + std::reverse(Worklist.begin(), Worklist.end()); while (!Worklist.empty()) { GlobalValue *GV = Worklist.back(); @@ -1677,6 +1682,11 @@ IRMover::IRMover(Module &M) : Composite(M) { for (const auto *MD : StructTypes.getVisitedMetadata()) { SharedMDs[MD].reset(const_cast<MDNode *>(MD)); } + + // Convert module level attributes to function level attributes because + // after merging modules the attributes might change and would have different + // effect on the functions as the original module would have. + copyModuleAttrToFunctions(M); } Error IRMover::move(std::unique_ptr<Module> Src, diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp index e45cac8..048c58d 100644 --- a/llvm/lib/Passes/PassBuilder.cpp +++ b/llvm/lib/Passes/PassBuilder.cpp @@ -1095,6 +1095,31 @@ Expected<MemorySanitizerOptions> parseMSanPassOptions(StringRef Params) { return Result; } +Expected<AllocTokenOptions> parseAllocTokenPassOptions(StringRef Params) { + AllocTokenOptions Result; + while (!Params.empty()) { + StringRef ParamName; + std::tie(ParamName, Params) = Params.split(';'); + + if (ParamName.consume_front("mode=")) { + if (auto Mode = getAllocTokenModeFromString(ParamName)) + Result.Mode = *Mode; + else + return make_error<StringError>( + formatv("invalid argument to AllocToken pass mode " + "parameter: '{}'", + ParamName) + .str(), + inconvertibleErrorCode()); + } else { + return make_error<StringError>( + formatv("invalid AllocToken pass parameter '{}'", ParamName).str(), + inconvertibleErrorCode()); + } + } + return Result; +} + /// Parser of parameters for SimplifyCFG pass. Expected<SimplifyCFGOptions> parseSimplifyCFGOptions(StringRef Params) { SimplifyCFGOptions Result; diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def index 884d8da..a66b6e4 100644 --- a/llvm/lib/Passes/PassRegistry.def +++ b/llvm/lib/Passes/PassRegistry.def @@ -126,7 +126,6 @@ MODULE_PASS("openmp-opt", OpenMPOptPass()) MODULE_PASS("openmp-opt-postlink", OpenMPOptPass(ThinOrFullLTOPhase::FullLTOPostLink)) MODULE_PASS("partial-inliner", PartialInlinerPass()) -MODULE_PASS("alloc-token", AllocTokenPass()) MODULE_PASS("pgo-icall-prom", PGOIndirectCallPromotion()) MODULE_PASS("pgo-instr-gen", PGOInstrumentationGen()) MODULE_PASS("pgo-instr-use", PGOInstrumentationUse()) @@ -183,6 +182,10 @@ MODULE_PASS("wholeprogramdevirt", WholeProgramDevirtPass()) #define MODULE_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS) #endif MODULE_PASS_WITH_PARAMS( + "alloc-token", "AllocTokenPass", + [](AllocTokenOptions Opts) { return AllocTokenPass(Opts); }, + parseAllocTokenPassOptions, "mode=<mode>") +MODULE_PASS_WITH_PARAMS( "asan", "AddressSanitizerPass", [](AddressSanitizerOptions Opts) { return AddressSanitizerPass(Opts); }, parseASanPassOptions, "kernel;use-after-scope") diff --git a/llvm/lib/Support/AllocToken.cpp b/llvm/lib/Support/AllocToken.cpp new file mode 100644 index 0000000..8e9e89f --- /dev/null +++ b/llvm/lib/Support/AllocToken.cpp @@ -0,0 +1,61 @@ +//===- AllocToken.cpp - Allocation Token Calculation ----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Definition of AllocToken modes and shared calculation of stateless token IDs. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/AllocToken.h" +#include "llvm/ADT/StringSwitch.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/SipHash.h" + +using namespace llvm; + +std::optional<AllocTokenMode> +llvm::getAllocTokenModeFromString(StringRef Name) { + return StringSwitch<std::optional<AllocTokenMode>>(Name) + .Case("increment", AllocTokenMode::Increment) + .Case("random", AllocTokenMode::Random) + .Case("typehash", AllocTokenMode::TypeHash) + .Case("typehashpointersplit", AllocTokenMode::TypeHashPointerSplit) + .Default(std::nullopt); +} + +static uint64_t getStableHash(const AllocTokenMetadata &Metadata, + uint64_t MaxTokens) { + return getStableSipHash(Metadata.TypeName) % MaxTokens; +} + +std::optional<uint64_t> llvm::getAllocToken(AllocTokenMode Mode, + const AllocTokenMetadata &Metadata, + uint64_t MaxTokens) { + assert(MaxTokens && "Must provide non-zero max tokens"); + + switch (Mode) { + case AllocTokenMode::Increment: + case AllocTokenMode::Random: + // Stateful modes cannot be implemented as a pure function. + return std::nullopt; + + case AllocTokenMode::TypeHash: + return getStableHash(Metadata, MaxTokens); + + case AllocTokenMode::TypeHashPointerSplit: { + if (MaxTokens == 1) + return 0; + const uint64_t HalfTokens = MaxTokens / 2; + uint64_t Hash = getStableHash(Metadata, HalfTokens); + if (Metadata.ContainsPointer) + Hash += HalfTokens; + return Hash; + } + } + + llvm_unreachable(""); +} diff --git a/llvm/lib/Support/CMakeLists.txt b/llvm/lib/Support/CMakeLists.txt index 42b21b5..671a5fe 100644 --- a/llvm/lib/Support/CMakeLists.txt +++ b/llvm/lib/Support/CMakeLists.txt @@ -149,6 +149,7 @@ add_llvm_component_library(LLVMSupport AArch64BuildAttributes.cpp ARMAttributeParser.cpp ARMWinEH.cpp + AllocToken.cpp Allocator.cpp AutoConvert.cpp Base64.cpp diff --git a/llvm/lib/Support/SpecialCaseList.cpp b/llvm/lib/Support/SpecialCaseList.cpp index 549c418..f74e52a 100644 --- a/llvm/lib/Support/SpecialCaseList.cpp +++ b/llvm/lib/Support/SpecialCaseList.cpp @@ -111,7 +111,7 @@ Error SpecialCaseList::Matcher::insert(StringRef Pattern, unsigned LineNumber) { return std::visit([&](auto &V) { return V.insert(Pattern, LineNumber); }, M); } -LLVM_ABI void SpecialCaseList::Matcher::preprocess(bool BySize) { +void SpecialCaseList::Matcher::preprocess(bool BySize) { return std::visit([&](auto &V) { return V.preprocess(BySize); }, M); } diff --git a/llvm/lib/Target/AArch64/AArch64InstrAtomics.td b/llvm/lib/Target/AArch64/AArch64InstrAtomics.td index 31fcd63..5d9215d 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrAtomics.td +++ b/llvm/lib/Target/AArch64/AArch64InstrAtomics.td @@ -136,8 +136,8 @@ def : Pat<(f32 (bitconvert (i32 (relaxed_load<atomic_load_nonext_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend))))), (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>; def : Pat<(f32 (bitconvert (i32 (relaxed_load<atomic_load_nonext_32> - (am_indexed32 GPR64sp:$Rn, uimm12s8:$offset))))), - (LDRSui GPR64sp:$Rn, uimm12s8:$offset)>; + (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))), + (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>; def : Pat<(f32 (bitconvert (i32 (relaxed_load<atomic_load_nonext_32> (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))), (LDURSi GPR64sp:$Rn, simm9:$offset)>; @@ -236,11 +236,11 @@ def : Pat<(relaxed_store<atomic_store_32> def : Pat<(releasing_store<atomic_store_64> GPR64sp:$ptr, GPR64:$val), (STLRX GPR64:$val, GPR64sp:$ptr)>; def : Pat<(relaxed_store<atomic_store_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm, - ro_Wextend16:$extend), + ro_Wextend64:$extend), GPR64:$val), (STRXroW GPR64:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>; def : Pat<(relaxed_store<atomic_store_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm, - ro_Xextend16:$extend), + ro_Xextend64:$extend), GPR64:$val), (STRXroX GPR64:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>; def : Pat<(relaxed_store<atomic_store_64> @@ -276,8 +276,8 @@ def : Pat<(relaxed_store<atomic_store_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm, (i64 (bitconvert (f64 FPR64Op:$val)))), (STRDroX FPR64Op:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>; def : Pat<(relaxed_store<atomic_store_64> - (am_indexed64 GPR64sp:$Rn, uimm12s4:$offset), (i64 (bitconvert (f64 FPR64Op:$val)))), - (STRDui FPR64Op:$val, GPR64sp:$Rn, uimm12s4:$offset)>; + (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset), (i64 (bitconvert (f64 FPR64Op:$val)))), + (STRDui FPR64Op:$val, GPR64sp:$Rn, uimm12s8:$offset)>; def : Pat<(relaxed_store<atomic_store_64> (am_unscaled64 GPR64sp:$Rn, simm9:$offset), (i64 (bitconvert (f64 FPR64Op:$val)))), (STURDi FPR64Op:$val, GPR64sp:$Rn, simm9:$offset)>; diff --git a/llvm/lib/Target/AArch64/AArch64InstrGISel.td b/llvm/lib/Target/AArch64/AArch64InstrGISel.td index fe84193..30b7b03 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrGISel.td +++ b/llvm/lib/Target/AArch64/AArch64InstrGISel.td @@ -507,7 +507,7 @@ let AddedComplexity = 19 in { defm : VecROStoreLane64_0Pat<ro32, store, v2i32, i32, ssub, STRSroW, STRSroX>; } -def : Pat<(v8i8 (AArch64dup (i8 (load (am_indexed8 GPR64sp:$Rn))))), +def : Pat<(v8i8 (AArch64dup (i8 (load GPR64sp:$Rn)))), (LD1Rv8b GPR64sp:$Rn)>; def : Pat<(v16i8 (AArch64dup (i8 (load GPR64sp:$Rn)))), (LD1Rv16b GPR64sp:$Rn)>; diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.td b/llvm/lib/Target/AArch64/AArch64RegisterInfo.td index ef974df..47144c7 100644 --- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.td +++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.td @@ -993,7 +993,7 @@ def PPR_3b : PPRClass<0, 7> { // Restricted 3 bit SVE predicate register class. let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::PPRRegClassID, 0, 8>"; } def PPR_p8to15 : PPRClass<8, 15> { - let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::PNRRegClassID, 8, 8>"; + let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::PPRRegClassID, 8, 8>"; } def PPRMul2 : PPRClass<0, 14, 2>; diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp index 1f773e2..3368a50 100644 --- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp @@ -820,7 +820,7 @@ void ARMAsmPrinter::emitAttributes() { auto *BTIValue = mdconst::extract_or_null<ConstantInt>( SourceModule->getModuleFlag("branch-target-enforcement")); - if (BTIValue && BTIValue->isOne()) { + if (BTIValue && !BTIValue->isZero()) { // If "+pacbti" is used as an architecture extension, // Tag_BTI_extension is emitted in // ARMTargetStreamer::emitTargetAttributes(). diff --git a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp index 406f4c1..597d311 100644 --- a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp +++ b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp @@ -1036,6 +1036,7 @@ bool LowOverheadLoop::ValidateLiveOuts() { while (!Worklist.empty()) { MachineInstr *MI = Worklist.pop_back_val(); if (MI->getOpcode() == ARM::MQPRCopy) { + LLVM_DEBUG(dbgs() << " Must generate copy as VMOV: " << *MI); VMOVCopies.insert(MI); MachineInstr *CopySrc = RDI.getUniqueReachingMIDef(MI, MI->getOperand(1).getReg()); @@ -1045,6 +1046,20 @@ bool LowOverheadLoop::ValidateLiveOuts() { LLVM_DEBUG(dbgs() << " Unable to handle live out: " << *MI); VMOVCopies.clear(); return false; + } else if (isVectorPredicated(MI)) { + // If this is a predicated instruction with merging semantics, + // check where it gets its false lanes from, if any. + int InactiveIdx = findVPTInactiveOperandIdx(*MI); + if (InactiveIdx != -1) { + SmallPtrSet<MachineInstr *, 2> Defs; + MachineInstr *FalseSrc = RDI.getUniqueReachingMIDef( + MI, MI->getOperand(InactiveIdx).getReg()); + if (FalseSrc) { + LLVM_DEBUG(dbgs() + << " Must check source of false lanes for: " << *MI); + Worklist.push_back(FalseSrc); + } + } } } diff --git a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp index 431ce38..f5653d4 100644 --- a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp +++ b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp @@ -805,6 +805,16 @@ int llvm::findFirstVPTPredOperandIdx(const MachineInstr &MI) { return -1; } +int llvm::findVPTInactiveOperandIdx(const MachineInstr &MI) { + const MCInstrDesc &MCID = MI.getDesc(); + + for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) + if (MCID.operands()[i].OperandType == ARM::OPERAND_VPRED_R) + return i + ARM::SUBOP_vpred_r_inactive; + + return -1; +} + ARMVCC::VPTCodes llvm::getVPTInstrPredicate(const MachineInstr &MI, Register &PredReg) { int PIdx = findFirstVPTPredOperandIdx(MI); diff --git a/llvm/lib/Target/ARM/Thumb2InstrInfo.h b/llvm/lib/Target/ARM/Thumb2InstrInfo.h index 3ec3a621..1b0bf2d 100644 --- a/llvm/lib/Target/ARM/Thumb2InstrInfo.h +++ b/llvm/lib/Target/ARM/Thumb2InstrInfo.h @@ -90,6 +90,9 @@ inline ARMVCC::VPTCodes getVPTInstrPredicate(const MachineInstr &MI) { Register PredReg; return getVPTInstrPredicate(MI, PredReg); } +// Identify the input operand in an MVE predicated instruction which +// contributes the values of any inactive vector lanes. +int findVPTInactiveOperandIdx(const MachineInstr &MI); // Recomputes the Block Mask of Instr, a VPT or VPST instruction. // This rebuilds the block mask of the instruction depending on the predicates diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index d477522..17f04d0 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -14736,8 +14736,8 @@ SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, } unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { - // Note: This functionality is used only when unsafe-fp-math is enabled, and - // on cores with reciprocal estimates (which are used when unsafe-fp-math is + // Note: This functionality is used only when arcp is enabled, and + // on cores with reciprocal estimates (which are used when arcp is // enabled for division), this functionality is redundant with the default // combiner logic (once the division -> reciprocal/multiply transformation // has taken place). As a result, this matters more for older cores than for diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp index e857b2d..edde7ac 100644 --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -2406,7 +2406,8 @@ ParseStatus RISCVAsmParser::parseVTypeI(OperandVector &Operands) { } bool RISCVAsmParser::generateVTypeError(SMLoc ErrorLoc) { - if (STI->hasFeature(RISCV::FeatureStdExtZvfbfa)) + if (STI->hasFeature(RISCV::FeatureStdExtZvfbfa) || + STI->hasFeature(RISCV::FeatureVendorXSfvfbfexp16e)) return Error( ErrorLoc, "operand must be " diff --git a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp index b8ec0bb..4bea4c4 100644 --- a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp +++ b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp @@ -654,7 +654,10 @@ static constexpr FeatureBitset XqciFeatureGroup = { static constexpr FeatureBitset XSfVectorGroup = { RISCV::FeatureVendorXSfvcp, RISCV::FeatureVendorXSfvqmaccdod, RISCV::FeatureVendorXSfvqmaccqoq, RISCV::FeatureVendorXSfvfwmaccqqq, - RISCV::FeatureVendorXSfvfnrclipxfqf, RISCV::FeatureVendorXSfmmbase}; + RISCV::FeatureVendorXSfvfnrclipxfqf, RISCV::FeatureVendorXSfmmbase, + RISCV::FeatureVendorXSfvfexpa, RISCV::FeatureVendorXSfvfexpa64e, + RISCV::FeatureVendorXSfvfbfexp16e, RISCV::FeatureVendorXSfvfexp16e, + RISCV::FeatureVendorXSfvfexp32e}; static constexpr FeatureBitset XSfSystemGroup = { RISCV::FeatureVendorXSiFivecdiscarddlone, RISCV::FeatureVendorXSiFivecflushdlone, diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp index 50f5a5d..7b9c4b3 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp @@ -220,7 +220,8 @@ void RISCVInstPrinter::printVTypeI(const MCInst *MI, unsigned OpNo, if (RISCVVType::getVLMUL(Imm) == RISCVVType::VLMUL::LMUL_RESERVED || RISCVVType::getSEW(Imm) > 64 || (RISCVVType::isAltFmt(Imm) && - !STI.hasFeature(RISCV::FeatureStdExtZvfbfa)) || + !(STI.hasFeature(RISCV::FeatureStdExtZvfbfa) || + STI.hasFeature(RISCV::FeatureVendorXSfvfbfexp16e))) || (Imm >> 9) != 0) { O << formatImm(Imm); return; diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td index 3abbbb3..9e6b7f0 100644 --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -1335,6 +1335,44 @@ def HasVendorXSfvfnrclipxfqf AssemblerPredicate<(all_of FeatureVendorXSfvfnrclipxfqf), "'XSfvfnrclipxfqf' (SiFive FP32-to-int8 Ranged Clip Instructions)">; +// Note: XSfvfbfexp16e depends on either Zvfbfmin _or_ Zvfbfa, which cannot be expressed here in +// TableGen. Instead, we check that in RISCVISAInfo. +def FeatureVendorXSfvfbfexp16e + : RISCVExtension<0, 5, + "SiFive Vector Floating-Point Exponential Function Instruction, BFloat16">; +def HasVendorXSfvfbfexp16e : Predicate<"Subtarget->hasVendorXSfvfbfexp16e()">; + +def FeatureVendorXSfvfexp16e + : RISCVExtension<0, 5, + "SiFive Vector Floating-Point Exponential Function Instruction, Half Precision", + [FeatureStdExtZvfh]>; +def HasVendorXSfvfexp16e : Predicate<"Subtarget->hasVendorXSfvfexp16e()">; + +def FeatureVendorXSfvfexp32e + : RISCVExtension<0, 5, + "SiFive Vector Floating-Point Exponential Function Instruction, Single Precision", + [FeatureStdExtZve32f]>; +def HasVendorXSfvfexp32e : Predicate<"Subtarget->hasVendorXSfvfexp32e()">; + +def HasVendorXSfvfexpAnyFloat : Predicate<"Subtarget->hasVendorXSfvfexp16e() || Subtarget->hasVendorXSfvfexp32e()">; +def HasVendorXSfvfexpAny : Predicate<"Subtarget->hasVendorXSfvfbfexp16e() || Subtarget->hasVendorXSfvfexp16e() || Subtarget->hasVendorXSfvfexp32e()">, + AssemblerPredicate<(any_of FeatureVendorXSfvfbfexp16e, FeatureVendorXSfvfexp16e, FeatureVendorXSfvfexp32e), + "'Xsfvfbfexp16e', 'Xsfvfexp16e', or 'Xsfvfexp32e' (SiFive Vector Floating-Point Exponential Function Instruction)">; + +def FeatureVendorXSfvfexpa + : RISCVExtension<0, 2, + "SiFive Vector Floating-Point Exponential Approximation Instruction", + [FeatureStdExtZve32f]>; +def HasVendorXSfvfexpa : Predicate<"Subtarget->hasVendorXSfvfexpa()">, + AssemblerPredicate<(all_of FeatureVendorXSfvfexpa), + "'Xsfvfexpa' (SiFive Vector Floating-Point Exponential Approximation Instruction)">; + +def FeatureVendorXSfvfexpa64e + : RISCVExtension<0, 2, + "SiFive Vector Floating-Point Exponential Approximation Instruction with Double-Precision", + [FeatureVendorXSfvfexpa, FeatureStdExtZve64d]>; +def HasVendorXSfvfexpa64e : Predicate<"Subtarget->hasVendorXSfvfexpa64e()">; + def FeatureVendorXSiFivecdiscarddlone : RISCVExtension<1, 0, "SiFive sf.cdiscard.d.l1 Instruction", []>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index 12f776b..912b82d 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -1689,42 +1689,44 @@ bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp, // instruction opcode. Otherwise, return RISCV::INSTRUCTION_LIST_END. // TODO: Support more operations. unsigned getPredicatedOpcode(unsigned Opcode) { + // clang-format off switch (Opcode) { - case RISCV::ADD: return RISCV::PseudoCCADD; break; - case RISCV::SUB: return RISCV::PseudoCCSUB; break; - case RISCV::SLL: return RISCV::PseudoCCSLL; break; - case RISCV::SRL: return RISCV::PseudoCCSRL; break; - case RISCV::SRA: return RISCV::PseudoCCSRA; break; - case RISCV::AND: return RISCV::PseudoCCAND; break; - case RISCV::OR: return RISCV::PseudoCCOR; break; - case RISCV::XOR: return RISCV::PseudoCCXOR; break; - - case RISCV::ADDI: return RISCV::PseudoCCADDI; break; - case RISCV::SLLI: return RISCV::PseudoCCSLLI; break; - case RISCV::SRLI: return RISCV::PseudoCCSRLI; break; - case RISCV::SRAI: return RISCV::PseudoCCSRAI; break; - case RISCV::ANDI: return RISCV::PseudoCCANDI; break; - case RISCV::ORI: return RISCV::PseudoCCORI; break; - case RISCV::XORI: return RISCV::PseudoCCXORI; break; - - case RISCV::ADDW: return RISCV::PseudoCCADDW; break; - case RISCV::SUBW: return RISCV::PseudoCCSUBW; break; - case RISCV::SLLW: return RISCV::PseudoCCSLLW; break; - case RISCV::SRLW: return RISCV::PseudoCCSRLW; break; - case RISCV::SRAW: return RISCV::PseudoCCSRAW; break; - - case RISCV::ADDIW: return RISCV::PseudoCCADDIW; break; - case RISCV::SLLIW: return RISCV::PseudoCCSLLIW; break; - case RISCV::SRLIW: return RISCV::PseudoCCSRLIW; break; - case RISCV::SRAIW: return RISCV::PseudoCCSRAIW; break; - - case RISCV::ANDN: return RISCV::PseudoCCANDN; break; - case RISCV::ORN: return RISCV::PseudoCCORN; break; - case RISCV::XNOR: return RISCV::PseudoCCXNOR; break; - - case RISCV::NDS_BFOS: return RISCV::PseudoCCNDS_BFOS; break; - case RISCV::NDS_BFOZ: return RISCV::PseudoCCNDS_BFOZ; break; + case RISCV::ADD: return RISCV::PseudoCCADD; + case RISCV::SUB: return RISCV::PseudoCCSUB; + case RISCV::SLL: return RISCV::PseudoCCSLL; + case RISCV::SRL: return RISCV::PseudoCCSRL; + case RISCV::SRA: return RISCV::PseudoCCSRA; + case RISCV::AND: return RISCV::PseudoCCAND; + case RISCV::OR: return RISCV::PseudoCCOR; + case RISCV::XOR: return RISCV::PseudoCCXOR; + + case RISCV::ADDI: return RISCV::PseudoCCADDI; + case RISCV::SLLI: return RISCV::PseudoCCSLLI; + case RISCV::SRLI: return RISCV::PseudoCCSRLI; + case RISCV::SRAI: return RISCV::PseudoCCSRAI; + case RISCV::ANDI: return RISCV::PseudoCCANDI; + case RISCV::ORI: return RISCV::PseudoCCORI; + case RISCV::XORI: return RISCV::PseudoCCXORI; + + case RISCV::ADDW: return RISCV::PseudoCCADDW; + case RISCV::SUBW: return RISCV::PseudoCCSUBW; + case RISCV::SLLW: return RISCV::PseudoCCSLLW; + case RISCV::SRLW: return RISCV::PseudoCCSRLW; + case RISCV::SRAW: return RISCV::PseudoCCSRAW; + + case RISCV::ADDIW: return RISCV::PseudoCCADDIW; + case RISCV::SLLIW: return RISCV::PseudoCCSLLIW; + case RISCV::SRLIW: return RISCV::PseudoCCSRLIW; + case RISCV::SRAIW: return RISCV::PseudoCCSRAIW; + + case RISCV::ANDN: return RISCV::PseudoCCANDN; + case RISCV::ORN: return RISCV::PseudoCCORN; + case RISCV::XNOR: return RISCV::PseudoCCXNOR; + + case RISCV::NDS_BFOS: return RISCV::PseudoCCNDS_BFOS; + case RISCV::NDS_BFOZ: return RISCV::PseudoCCNDS_BFOZ; } + // clang-format on return RISCV::INSTRUCTION_LIST_END; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td index 6a4119a..4104abd 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td @@ -217,6 +217,14 @@ let Predicates = [HasVendorXSfvcp], mayLoad = 0, mayStore = 0, defm FVW : CustomSiFiveVCIX<"fvw", VCIX_XVW, VR, VR, FPR32>, Sched<[]>; } +let Predicates = [HasVendorXSfvfexpAny], DecoderNamespace = "XSfvector" in { + def SF_VFEXP_V : VALUVs2<0b010011, 0b00111, OPFVV, "sf.vfexp.v">; +} + +let Predicates = [HasVendorXSfvfexpa], DecoderNamespace = "XSfvector" in { + def SF_VFEXPA_V : VALUVs2<0b010011, 0b00110, OPFVV, "sf.vfexpa.v">; +} + let Predicates = [HasVendorXSfvqmaccdod], DecoderNamespace = "XSfvector", DestEEW = EEWSEWx4, RVVConstraint=VS2Constraint in { def SF_VQMACCU_2x8x2 : CustomSiFiveVMACC<0b101100, OPMVV, "sf.vqmaccu.2x8x2">; diff --git a/llvm/lib/Target/TargetMachine.cpp b/llvm/lib/Target/TargetMachine.cpp index cf85691..9bda8a4 100644 --- a/llvm/lib/Target/TargetMachine.cpp +++ b/llvm/lib/Target/TargetMachine.cpp @@ -158,7 +158,6 @@ void TargetMachine::resetTargetOptions(const Function &F) const { Options.X = F.getFnAttribute(Y).getValueAsBool(); \ } while (0) - RESET_OPTION(UnsafeFPMath, "unsafe-fp-math"); RESET_OPTION(NoInfsFPMath, "no-infs-fp-math"); RESET_OPTION(NoNaNsFPMath, "no-nans-fp-math"); RESET_OPTION(NoSignedZerosFPMath, "no-signed-zeros-fp-math"); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp index f973949..7ec463b 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -183,6 +183,11 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( for (auto T : {MVT::i32, MVT::i64}) setOperationAction(Op, T, Custom); + if (Subtarget->hasRelaxedSIMD()) { + setOperationAction( + {ISD::FMINNUM, ISD::FMINIMUMNUM, ISD::FMAXNUM, ISD::FMAXIMUMNUM}, + {MVT::v4f32, MVT::v2f64}, Legal); + } // SIMD-specific configuration if (Subtarget->hasSIMD128()) { diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td index 7840620..f0ac26b 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td @@ -1742,6 +1742,23 @@ defm SIMD_RELAXED_FMIN : defm SIMD_RELAXED_FMAX : RelaxedBinary<F64x2, int_wasm_relaxed_max, "relaxed_max", 0x110>; +let Predicates = [HasRelaxedSIMD] in { + foreach vec = [F32x4, F64x2] in { + defvar relaxed_min = !cast<NI>("SIMD_RELAXED_FMIN_"#vec); + defvar relaxed_max = !cast<NI>("SIMD_RELAXED_FMAX_"#vec); + + // Transform standard fminimum/fmaximum to relaxed versions + def : Pat<(vec.vt (fminnum (vec.vt V128:$lhs), (vec.vt V128:$rhs))), + (relaxed_min V128:$lhs, V128:$rhs)>; + def : Pat<(vec.vt (fminimumnum (vec.vt V128:$lhs), (vec.vt V128:$rhs))), + (relaxed_min V128:$lhs, V128:$rhs)>; + def : Pat<(vec.vt (fmaxnum (vec.vt V128:$lhs), (vec.vt V128:$rhs))), + (relaxed_max V128:$lhs, V128:$rhs)>; + def : Pat<(vec.vt (fmaximumnum (vec.vt V128:$lhs), (vec.vt V128:$rhs))), + (relaxed_max V128:$lhs, V128:$rhs)>; + } +} + //===----------------------------------------------------------------------===// // Relaxed rounding q15 multiplication //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index b5f8ee5..d49f25a 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -20558,7 +20558,7 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, const SDLoc &DL, // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f); // NOTE: By using fsub of a positive constant instead of fadd of a negative - // constant, we avoid reassociation in MachineCombiner when unsafe-fp-math is + // constant, we avoid reassociation in MachineCombiner when reassoc is // enabled. See PR24512. SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High); // TODO: Are there any fast-math-flags to propagate here? @@ -29516,11 +29516,8 @@ static SDValue LowerFMINIMUM_FMAXIMUM(SDValue Op, const X86Subtarget &Subtarget, if (IgnoreNaN || DAG.isKnownNeverNaN(IsNum ? NewY : NewX)) return MinMax; - if (DAG.isKnownNeverNaN(NewX)) - NewX = NewY; - - SDValue IsNaN = - DAG.getSetCC(DL, SetCCType, NewX, NewX, IsNum ? ISD::SETO : ISD::SETUO); + SDValue NaNSrc = IsNum ? MinMax : NewX; + SDValue IsNaN = DAG.getSetCC(DL, SetCCType, NaNSrc, NaNSrc, ISD::SETUO); return DAG.getSelect(DL, VT, IsNaN, NewX, MinMax); } diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td index 83bd6ac..1b748b7 100644 --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -5519,7 +5519,7 @@ defm VMIN : avx512_binop_s_sae<0x5D, "vmin", X86any_fmin, X86fmins, X86fminSAEs, defm VMAX : avx512_binop_s_sae<0x5F, "vmax", X86any_fmax, X86fmaxs, X86fmaxSAEs, SchedWriteFCmpSizes, 0>; -// MIN/MAX nodes are commutable under "unsafe-fp-math". In this case we use +// MIN/MAX nodes are commutable under (nnan + ninf). In this case we use // X86fminc and X86fmaxc instead of X86fmin and X86fmax multiclass avx512_comutable_binop_s<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, SDNode OpNode, diff --git a/llvm/lib/Target/X86/X86ScheduleZnver4.td b/llvm/lib/Target/X86/X86ScheduleZnver4.td index cc30054..ac4d31d 100644 --- a/llvm/lib/Target/X86/X86ScheduleZnver4.td +++ b/llvm/lib/Target/X86/X86ScheduleZnver4.td @@ -15,7 +15,7 @@ //===----------------------------------------------------------------------===// def Znver4Model : SchedMachineModel { - // AMD SOG Zen4, 2.9.6 Dispatch + // AMD SOG Zen4, 2.9.8 Dispatch // The processor may dispatch up to 6 macro ops per cycle // into the execution engine. let IssueWidth = 6; @@ -46,8 +46,9 @@ def Znver4Model : SchedMachineModel { int VecLoadLatency = 7; // Latency of a simple store operation. int StoreLatency = 1; - // FIXME: - let HighLatency = 25; // FIXME: any better choice? + // Mean and median value for all instructions with latencies >6 + // Source: Zen4 Instruction Latencies spreadsheet (included with SOG) + let HighLatency = 13; // AMD SOG Zen4, 2.8 Optimizing Branching // The branch misprediction penalty is in the range from 11 to 18 cycles, // <...>. The common case penalty is 13 cycles. @@ -612,6 +613,7 @@ def Zn4WriteLEA : SchedWriteVariant<[ def : InstRW<[Zn4WriteLEA], (instrs LEA32r, LEA64r, LEA64_32r)>; +// values from uops.info def Zn4SlowLEA16r : SchedWriteRes<[Zn4ALU0123]> { let Latency = 2; // FIXME: not from llvm-exegesis let ReleaseAtCycles = [4]; @@ -659,15 +661,15 @@ def : InstRW<[Zn4WriteCMPXCHG8rm_LCMPXCHG8], (instrs CMPXCHG8rm, LCMPXCHG8)>; def Zn4WriteCMPXCHG8B : SchedWriteRes<[Zn4ALU0123]> { let Latency = 3; // FIXME: not from llvm-exegesis - let ReleaseAtCycles = [24]; - let NumMicroOps = 19; + let ReleaseAtCycles = [20]; + let NumMicroOps = 15; } def : InstRW<[Zn4WriteCMPXCHG8B], (instrs CMPXCHG8B)>; def Zn4WriteCMPXCHG16B_LCMPXCHG16B : SchedWriteRes<[Zn4ALU0123]> { - let Latency = 4; // FIXME: not from llvm-exegesis - let ReleaseAtCycles = [59]; - let NumMicroOps = 28; + let Latency = 2; // FIXME: not from llvm-exegesis + let ReleaseAtCycles = [40]; + let NumMicroOps = 26; } def : InstRW<[Zn4WriteCMPXCHG16B_LCMPXCHG16B], (instrs CMPXCHG16B, LCMPXCHG16B)>; @@ -681,7 +683,7 @@ def : InstRW<[Zn4WriteWriteXCHGUnrenameable], (instrs XCHG8rr, XCHG16rr, XCHG16a def Zn4WriteXCHG8rm_XCHG16rm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4ALU0123]> { let Latency = !add(Znver4Model.LoadLatency, 3); // FIXME: not from llvm-exegesis let ReleaseAtCycles = [1, 1, 2]; - let NumMicroOps = 5; + let NumMicroOps = 2; } def : InstRW<[Zn4WriteXCHG8rm_XCHG16rm], (instrs XCHG8rm, XCHG16rm)>; @@ -693,19 +695,17 @@ def Zn4WriteXCHG32rm_XCHG64rm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4ALU0123]> def : InstRW<[Zn4WriteXCHG32rm_XCHG64rm], (instrs XCHG32rm, XCHG64rm)>; // Integer division. -// FIXME: uops for 8-bit division measures as 2. for others it's a guess. -// FIXME: latency for 8-bit division measures as 10. for others it's a guess. -defm : Zn4WriteResIntPair<WriteDiv8, [Zn4Divider], 10, [10], 2>; -defm : Zn4WriteResIntPair<WriteDiv16, [Zn4Divider], 11, [11], 2>; -defm : Zn4WriteResIntPair<WriteDiv32, [Zn4Divider], 13, [13], 2>; -defm : Zn4WriteResIntPair<WriteDiv64, [Zn4Divider], 17, [17], 2>; -defm : Zn4WriteResIntPair<WriteIDiv8, [Zn4Divider], 10, [10], 2>; -defm : Zn4WriteResIntPair<WriteIDiv16, [Zn4Divider], 11, [11], 2>; -defm : Zn4WriteResIntPair<WriteIDiv32, [Zn4Divider], 13, [13], 2>; -defm : Zn4WriteResIntPair<WriteIDiv64, [Zn4Divider], 17, [17], 2>; - -defm : Zn4WriteResIntPair<WriteBSF, [Zn4ALU1], 1, [1], 6, /*LoadUOps=*/1>; // Bit scan forward. -defm : Zn4WriteResIntPair<WriteBSR, [Zn4ALU1], 1, [1], 6, /*LoadUOps=*/1>; // Bit scan reverse. +defm : Zn4WriteResIntPair<WriteDiv8, [Zn4Divider], 9, [9], 2>; +defm : Zn4WriteResIntPair<WriteDiv16, [Zn4Divider], 10, [10], 2>; +defm : Zn4WriteResIntPair<WriteDiv32, [Zn4Divider], 12, [12], 2>; +defm : Zn4WriteResIntPair<WriteDiv64, [Zn4Divider], 18, [18], 2>; +defm : Zn4WriteResIntPair<WriteIDiv8, [Zn4Divider], 9, [9], 2>; +defm : Zn4WriteResIntPair<WriteIDiv16, [Zn4Divider], 10, [10], 2>; +defm : Zn4WriteResIntPair<WriteIDiv32, [Zn4Divider], 12, [12], 2>; +defm : Zn4WriteResIntPair<WriteIDiv64, [Zn4Divider], 18, [18], 2>; + +defm : Zn4WriteResIntPair<WriteBSF, [Zn4ALU1], 1, [1], 1, /*LoadUOps=*/1>; // Bit scan forward. +defm : Zn4WriteResIntPair<WriteBSR, [Zn4ALU1], 1, [1], 1, /*LoadUOps=*/1>; // Bit scan reverse. defm : Zn4WriteResIntPair<WritePOPCNT, [Zn4ALU0123], 1, [1], 1>; // Bit population count. @@ -725,12 +725,12 @@ def Zn4WriteLZCNT16rr : SchedWriteRes<[Zn4ALU0123]> { } def : InstRW<[Zn4WriteLZCNT16rr], (instrs LZCNT16rr)>; -defm : Zn4WriteResIntPair<WriteTZCNT, [Zn4ALU12], 2, [1], 2>; // Trailing zero count. +defm : Zn4WriteResIntPair<WriteTZCNT, [Zn4ALU12], 1, [1], 1>; // Trailing zero count. def Zn4WriteTZCNT16rr : SchedWriteRes<[Zn4ALU0123]> { - let Latency = 2; - let ReleaseAtCycles = [4]; - let NumMicroOps = 2; + let Latency = 1; + let ReleaseAtCycles = [1]; + let NumMicroOps = 1; } def : InstRW<[Zn4WriteTZCNT16rr], (instrs TZCNT16rr)>; @@ -1109,15 +1109,31 @@ def Zn4WriteVecOpMaskKRMov : SchedWriteRes<[Zn4FPOpMask4]> { } def : InstRW<[Zn4WriteVecOpMaskKRMov], (instrs KMOVBkr, KMOVDkr, KMOVQkr, KMOVWkr)>; -def Zn4WriteVecALU2Slow : SchedWriteRes<[Zn4FPVAdd12]> { - // TODO: All align instructions are expected to be of 4 cycle latency - let Latency = 4; +// 128-bit VALIGN +def Zn4WriteXMMVecALU2Slow : SchedWriteRes<[Zn4FPVAdd12]> { + let Latency = 2; let ReleaseAtCycles = [1]; let NumMicroOps = 1; } -def : InstRW<[Zn4WriteVecALU2Slow], (instrs VALIGNDZrri, VALIGNDZ128rri, VALIGNDZ256rri, - VALIGNQZrri, VALIGNQZ128rri, VALIGNQZ256rri) - >; + +// 256-bit VALIGN +def Zn4WriteYMMVecALU2Slow : SchedWriteRes<[Zn4FPVAdd12]> { + let Latency = 3; + let ReleaseAtCycles = [1]; + let NumMicroOps = 1; +} + +// 512-bit VALIGN +def Zn4WriteZMMVecALU2Slow : SchedWriteRes<[Zn4FPVAdd12]> { + let Latency = 4; + let ReleaseAtCycles = [2]; + let NumMicroOps = 1; +} + +def : InstRW<[Zn4WriteXMMVecALU2Slow], (instrs VALIGNDZrri, VALIGNQZrri)>; +def : InstRW<[Zn4WriteYMMVecALU2Slow], (instrs VALIGNDZ128rri, VALIGNQZ128rri)>; +def : InstRW<[Zn4WriteZMMVecALU2Slow], (instrs VALIGNDZ256rri, VALIGNQZ256rri)>; + defm : Zn4WriteResYMMPair<WriteVecALUY, [Zn4FPVAdd0123], 1, [1], 1>; // Vector integer ALU op, no logicals (YMM). def Zn4WriteVecALUYSlow : SchedWriteRes<[Zn4FPVAdd01]> { @@ -1326,9 +1342,9 @@ def : InstRW<[Zn4WriteSHA256RNDS2rr], (instrs SHA256RNDS2rr)>; // Strings instructions. // Packed Compare Implicit Length Strings, Return Mask -defm : Zn4WriteResXMMPair<WritePCmpIStrM, [Zn4FPVAdd0123], 6, [8], 3, /*LoadUOps=*/1>; +defm : Zn4WriteResXMMPair<WritePCmpIStrM, [Zn4FPVAdd0123], 7, [8], 3, /*LoadUOps=*/1>; // Packed Compare Explicit Length Strings, Return Mask -defm : Zn4WriteResXMMPair<WritePCmpEStrM, [Zn4FPVAdd0123], 6, [12], 7, /*LoadUOps=*/5>; +defm : Zn4WriteResXMMPair<WritePCmpEStrM, [Zn4FPVAdd0123], 7, [12], 7, /*LoadUOps=*/5>; // Packed Compare Implicit Length Strings, Return Index defm : Zn4WriteResXMMPair<WritePCmpIStrI, [Zn4FPVAdd0123], 2, [8], 4>; // Packed Compare Explicit Length Strings, Return Index @@ -1340,7 +1356,7 @@ defm : Zn4WriteResXMMPair<WriteAESIMC, [Zn4FPAES01], 4, [1], 1>; // InvMixColumn defm : Zn4WriteResXMMPair<WriteAESKeyGen, [Zn4FPAES01], 4, [1], 1>; // Key Generation. // Carry-less multiplication instructions. -defm : Zn4WriteResXMMPair<WriteCLMul, [Zn4FPCLM01], 4, [4], 4>; +defm : Zn4WriteResXMMPair<WriteCLMul, [Zn4FPCLM01], 4, [3], 4>; // EMMS/FEMMS defm : Zn4WriteResInt<WriteEMMS, [Zn4ALU0123], 2, [1], 1>; // FIXME: latency not from llvm-exegesis @@ -1386,44 +1402,44 @@ def Zn4WriteVPERM2F128rm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPVShuf]> { def : InstRW<[Zn4WriteVPERM2F128rm], (instrs VPERM2F128rmi)>; def Zn4WriteVPERMPSYrr : SchedWriteRes<[Zn4FPVShuf]> { - let Latency = 7; + let Latency = 4; let ReleaseAtCycles = [1]; - let NumMicroOps = 2; + let NumMicroOps = 1; } def : InstRW<[Zn4WriteVPERMPSYrr], (instrs VPERMPSYrr)>; def Zn4WriteVPERMPSYrm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPVShuf]> { let Latency = !add(Znver4Model.VecLoadLatency, Zn4WriteVPERMPSYrr.Latency); - let ReleaseAtCycles = [1, 1, 2]; - let NumMicroOps = !add(Zn4WriteVPERMPSYrr.NumMicroOps, 1); + let ReleaseAtCycles = [1, 1, 1]; + let NumMicroOps = 1; } def : InstRW<[Zn4WriteVPERMPSYrm], (instrs VPERMPSYrm)>; def Zn4WriteVPERMYri : SchedWriteRes<[Zn4FPVShuf]> { - let Latency = 6; + let Latency = 4; let ReleaseAtCycles = [1]; - let NumMicroOps = 2; + let NumMicroOps = 1; } def : InstRW<[Zn4WriteVPERMYri], (instrs VPERMPDYri, VPERMQYri)>; def Zn4WriteVPERMPDYmi : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPVShuf]> { let Latency = !add(Znver4Model.VecLoadLatency, Zn4WriteVPERMYri.Latency); - let ReleaseAtCycles = [1, 1, 2]; - let NumMicroOps = !add(Zn4WriteVPERMYri.NumMicroOps, 1); + let ReleaseAtCycles = [1, 1, 1]; + let NumMicroOps = 1; } def : InstRW<[Zn4WriteVPERMPDYmi], (instrs VPERMPDYmi)>; def Zn4WriteVPERMDYrr : SchedWriteRes<[Zn4FPVShuf]> { - let Latency = 5; + let Latency = 4; let ReleaseAtCycles = [1]; - let NumMicroOps = 2; + let NumMicroOps = 1; } def : InstRW<[Zn4WriteVPERMDYrr], (instrs VPERMDYrr)>; def Zn4WriteVPERMYm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPVShuf]> { let Latency = !add(Znver4Model.VecLoadLatency, Zn4WriteVPERMDYrr.Latency); - let ReleaseAtCycles = [1, 1, 2]; - let NumMicroOps = !add(Zn4WriteVPERMDYrr.NumMicroOps, 0); + let ReleaseAtCycles = [1, 1, 1]; + let NumMicroOps = 1; } def : InstRW<[Zn4WriteVPERMYm], (instrs VPERMQYmi, VPERMDYrm)>; diff --git a/llvm/lib/TargetParser/RISCVISAInfo.cpp b/llvm/lib/TargetParser/RISCVISAInfo.cpp index 31126cc..f08a0c0 100644 --- a/llvm/lib/TargetParser/RISCVISAInfo.cpp +++ b/llvm/lib/TargetParser/RISCVISAInfo.cpp @@ -765,6 +765,12 @@ Error RISCVISAInfo::checkDependency() { if (HasZvl && !HasVector) return getExtensionRequiresError("zvl*b", "v' or 'zve*"); + if (Exts.count("xsfvfbfexp16e") && + !(Exts.count("zvfbfmin") || Exts.count("zvfbfa"))) + return createStringError(errc::invalid_argument, + "'xsfvfbfexp16e' requires 'zvfbfmin' or " + "'zvfbfa' extension to also be specified"); + if (HasD && (HasC || Exts.count("zcd"))) for (auto Ext : ZcdOverlaps) if (Exts.count(Ext.str())) diff --git a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp index 46fb567..aa1346d 100644 --- a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp +++ b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp @@ -1271,7 +1271,7 @@ bool LowerTypeTestsModule::hasBranchTargetEnforcement() { // the module flags. if (const auto *BTE = mdconst::extract_or_null<ConstantInt>( M.getModuleFlag("branch-target-enforcement"))) - HasBranchTargetEnforcement = (BTE->getZExtValue() != 0); + HasBranchTargetEnforcement = !BTE->isZero(); else HasBranchTargetEnforcement = 0; } diff --git a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp index 76e588b..a0f7ec6 100644 --- a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp +++ b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp @@ -24,7 +24,8 @@ // returns 0, or a single vtable's function returns 1, replace each virtual // call with a comparison of the vptr against that vtable's address. // -// This pass is intended to be used during the regular and thin LTO pipelines: +// This pass is intended to be used during the regular/thin and non-LTO +// pipelines: // // During regular LTO, the pass determines the best optimization for each // virtual call and applies the resolutions directly to virtual calls that are @@ -48,6 +49,14 @@ // is supported. // - Import phase: (same as with hybrid case above). // +// During Speculative devirtualization mode -not restricted to LTO-: +// - The pass applies speculative devirtualization without requiring any type of +// visibility. +// - Skips other features like virtual constant propagation, uniform return +// value optimization, unique return value optimization and branch funnels as +// they need LTO. +// - This mode is enabled via 'devirtualize-speculatively' flag. +// //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO/WholeProgramDevirt.h" @@ -61,7 +70,9 @@ #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/BasicAliasAnalysis.h" #include "llvm/Analysis/BlockFrequencyInfo.h" +#include "llvm/Analysis/ModuleSummaryAnalysis.h" #include "llvm/Analysis/OptimizationRemarkEmitter.h" +#include "llvm/Analysis/ProfileSummaryInfo.h" #include "llvm/Analysis/TypeMetadataUtils.h" #include "llvm/Bitcode/BitcodeReader.h" #include "llvm/Bitcode/BitcodeWriter.h" @@ -145,6 +156,13 @@ static cl::opt<std::string> ClWriteSummary( "bitcode, otherwise YAML"), cl::Hidden); +// TODO: This option eventually should support any public visibility vtables +// with/out LTO. +static cl::opt<bool> ClDevirtualizeSpeculatively( + "devirtualize-speculatively", + cl::desc("Enable speculative devirtualization optimization"), + cl::init(false)); + static cl::opt<unsigned> ClThreshold("wholeprogramdevirt-branch-funnel-threshold", cl::Hidden, cl::init(10), @@ -892,6 +910,8 @@ void llvm::updatePublicTypeTestCalls(Module &M, CI->eraseFromParent(); } } else { + // TODO: Don't replace public type tests when speculative devirtualization + // gets enabled in LTO mode. auto *True = ConstantInt::getTrue(M.getContext()); for (Use &U : make_early_inc_range(PublicTypeTestFunc->uses())) { auto *CI = cast<CallInst>(U.getUser()); @@ -1083,10 +1103,10 @@ bool DevirtModule::tryFindVirtualCallTargets( if (!TM.Bits->GV->isConstant()) return false; - // We cannot perform whole program devirtualization analysis on a vtable - // with public LTO visibility. - if (TM.Bits->GV->getVCallVisibility() == - GlobalObject::VCallVisibilityPublic) + // Without ClDevirtualizeSpeculatively, we cannot perform whole program + // devirtualization analysis on a vtable with public LTO visibility. + if (!ClDevirtualizeSpeculatively && TM.Bits->GV->getVCallVisibility() == + GlobalObject::VCallVisibilityPublic) return false; Function *Fn = nullptr; @@ -1105,6 +1125,12 @@ bool DevirtModule::tryFindVirtualCallTargets( if (Fn->getName() == "__cxa_pure_virtual") continue; + // In most cases empty functions will be overridden by the + // implementation of the derived class, so we can skip them. + if (ClDevirtualizeSpeculatively && Fn->getReturnType()->isVoidTy() && + Fn->getInstructionCount() <= 1) + continue; + // We can disregard unreachable functions as possible call targets, as // unreachable functions shouldn't be called. if (mustBeUnreachableFunction(Fn, ExportSummary)) @@ -1223,10 +1249,12 @@ void DevirtModule::applySingleImplDevirt(VTableSlotInfo &SlotInfo, CallTrap->setDebugLoc(CB.getDebugLoc()); } - // If fallback checking is enabled, add support to compare the virtual - // function pointer to the devirtualized target. In case of a mismatch, - // fall back to indirect call. - if (DevirtCheckMode == WPDCheckMode::Fallback) { + // If fallback checking or speculative devirtualization are enabled, + // add support to compare the virtual function pointer to the + // devirtualized target. In case of a mismatch, fall back to indirect + // call. + if (DevirtCheckMode == WPDCheckMode::Fallback || + ClDevirtualizeSpeculatively) { MDNode *Weights = MDBuilder(M.getContext()).createLikelyBranchWeights(); // Version the indirect call site. If the called value is equal to the // given callee, 'NewInst' will be executed, otherwise the original call @@ -2057,15 +2085,15 @@ void DevirtModule::scanTypeTestUsers( Function *TypeTestFunc, DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap) { // Find all virtual calls via a virtual table pointer %p under an assumption - // of the form llvm.assume(llvm.type.test(%p, %md)). This indicates that %p - // points to a member of the type identifier %md. Group calls by (type ID, - // offset) pair (effectively the identity of the virtual function) and store - // to CallSlots. + // of the form llvm.assume(llvm.type.test(%p, %md)) or + // llvm.assume(llvm.public.type.test(%p, %md)). + // This indicates that %p points to a member of the type identifier %md. + // Group calls by (type ID, offset) pair (effectively the identity of the + // virtual function) and store to CallSlots. for (Use &U : llvm::make_early_inc_range(TypeTestFunc->uses())) { auto *CI = dyn_cast<CallInst>(U.getUser()); if (!CI) continue; - // Search for virtual calls based on %p and add them to DevirtCalls. SmallVector<DevirtCallSite, 1> DevirtCalls; SmallVector<CallInst *, 1> Assumes; @@ -2348,6 +2376,12 @@ bool DevirtModule::run() { (ImportSummary && ImportSummary->partiallySplitLTOUnits())) return false; + Function *PublicTypeTestFunc = nullptr; + // If we are in speculative devirtualization mode, we can work on the public + // type test intrinsics. + if (ClDevirtualizeSpeculatively) + PublicTypeTestFunc = + Intrinsic::getDeclarationIfExists(&M, Intrinsic::public_type_test); Function *TypeTestFunc = Intrinsic::getDeclarationIfExists(&M, Intrinsic::type_test); Function *TypeCheckedLoadFunc = @@ -2361,8 +2395,9 @@ bool DevirtModule::run() { // module, this pass has nothing to do. But if we are exporting, we also need // to handle any users that appear only in the function summaries. if (!ExportSummary && - (!TypeTestFunc || TypeTestFunc->use_empty() || !AssumeFunc || - AssumeFunc->use_empty()) && + (((!PublicTypeTestFunc || PublicTypeTestFunc->use_empty()) && + (!TypeTestFunc || TypeTestFunc->use_empty())) || + !AssumeFunc || AssumeFunc->use_empty()) && (!TypeCheckedLoadFunc || TypeCheckedLoadFunc->use_empty()) && (!TypeCheckedLoadRelativeFunc || TypeCheckedLoadRelativeFunc->use_empty())) @@ -2373,6 +2408,9 @@ bool DevirtModule::run() { DenseMap<Metadata *, std::set<TypeMemberInfo>> TypeIdMap; buildTypeIdentifierMap(Bits, TypeIdMap); + if (PublicTypeTestFunc && AssumeFunc) + scanTypeTestUsers(PublicTypeTestFunc, TypeIdMap); + if (TypeTestFunc && AssumeFunc) scanTypeTestUsers(TypeTestFunc, TypeIdMap); @@ -2472,8 +2510,12 @@ bool DevirtModule::run() { .WPDRes[S.first.ByteOffset]; if (tryFindVirtualCallTargets(TargetsForSlot, TypeMemberInfos, S.first.ByteOffset, ExportSummary)) { - - if (!trySingleImplDevirt(ExportSummary, TargetsForSlot, S.second, Res)) { + bool SingleImplDevirt = + trySingleImplDevirt(ExportSummary, TargetsForSlot, S.second, Res); + // Out of speculative devirtualization mode, Try to apply virtual constant + // propagation or branch funneling. + // TODO: This should eventually be enabled for non-public type tests. + if (!SingleImplDevirt && !ClDevirtualizeSpeculatively) { DidVirtualConstProp |= tryVirtualConstProp(TargetsForSlot, S.second, Res, S.first); diff --git a/llvm/lib/Transforms/Instrumentation/AllocToken.cpp b/llvm/lib/Transforms/Instrumentation/AllocToken.cpp index 29968b8..8181e4e 100644 --- a/llvm/lib/Transforms/Instrumentation/AllocToken.cpp +++ b/llvm/lib/Transforms/Instrumentation/AllocToken.cpp @@ -36,6 +36,7 @@ #include "llvm/IR/Module.h" #include "llvm/IR/PassManager.h" #include "llvm/IR/Type.h" +#include "llvm/Support/AllocToken.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" @@ -54,47 +55,14 @@ #include <variant> using namespace llvm; +using TokenMode = AllocTokenMode; #define DEBUG_TYPE "alloc-token" namespace { -//===--- Constants --------------------------------------------------------===// - -enum class TokenMode : unsigned { - /// Incrementally increasing token ID. - Increment = 0, - - /// Simple mode that returns a statically-assigned random token ID. - Random = 1, - - /// Token ID based on allocated type hash. - TypeHash = 2, - - /// Token ID based on allocated type hash, where the top half ID-space is - /// reserved for types that contain pointers and the bottom half for types - /// that do not contain pointers. - TypeHashPointerSplit = 3, -}; - //===--- Command-line options ---------------------------------------------===// -cl::opt<TokenMode> ClMode( - "alloc-token-mode", cl::Hidden, cl::desc("Token assignment mode"), - cl::init(TokenMode::TypeHashPointerSplit), - cl::values( - clEnumValN(TokenMode::Increment, "increment", - "Incrementally increasing token ID"), - clEnumValN(TokenMode::Random, "random", - "Statically-assigned random token ID"), - clEnumValN(TokenMode::TypeHash, "typehash", - "Token ID based on allocated type hash"), - clEnumValN( - TokenMode::TypeHashPointerSplit, "typehashpointersplit", - "Token ID based on allocated type hash, where the top half " - "ID-space is reserved for types that contain pointers and the " - "bottom half for types that do not contain pointers. "))); - cl::opt<std::string> ClFuncPrefix("alloc-token-prefix", cl::desc("The allocation function prefix"), cl::Hidden, cl::init("__alloc_token_")); @@ -217,22 +185,19 @@ public: using ModeBase::ModeBase; uint64_t operator()(const CallBase &CB, OptimizationRemarkEmitter &ORE) { - const auto [N, H] = getHash(CB, ORE); - return N ? boundedToken(H) : H; - } -protected: - std::pair<MDNode *, uint64_t> getHash(const CallBase &CB, - OptimizationRemarkEmitter &ORE) { if (MDNode *N = getAllocTokenMetadata(CB)) { MDString *S = cast<MDString>(N->getOperand(0)); - return {N, getStableSipHash(S->getString())}; + AllocTokenMetadata Metadata{S->getString(), containsPointer(N)}; + if (auto Token = getAllocToken(TokenMode::TypeHash, Metadata, MaxTokens)) + return *Token; } // Fallback. remarkNoMetadata(CB, ORE); - return {nullptr, ClFallbackToken}; + return ClFallbackToken; } +protected: /// Remark that there was no precise type information. static void remarkNoMetadata(const CallBase &CB, OptimizationRemarkEmitter &ORE) { @@ -253,20 +218,18 @@ public: using TypeHashMode::TypeHashMode; uint64_t operator()(const CallBase &CB, OptimizationRemarkEmitter &ORE) { - if (MaxTokens == 1) - return 0; - const uint64_t HalfTokens = MaxTokens / 2; - const auto [N, H] = getHash(CB, ORE); - if (!N) { - // Pick the fallback token (ClFallbackToken), which by default is 0, - // meaning it'll fall into the pointer-less bucket. Override by setting - // -alloc-token-fallback if that is the wrong choice. - return H; + if (MDNode *N = getAllocTokenMetadata(CB)) { + MDString *S = cast<MDString>(N->getOperand(0)); + AllocTokenMetadata Metadata{S->getString(), containsPointer(N)}; + if (auto Token = getAllocToken(TokenMode::TypeHashPointerSplit, Metadata, + MaxTokens)) + return *Token; } - uint64_t Hash = H % HalfTokens; // base hash - if (containsPointer(N)) - Hash += HalfTokens; - return Hash; + // Pick the fallback token (ClFallbackToken), which by default is 0, meaning + // it'll fall into the pointer-less bucket. Override by setting + // -alloc-token-fallback if that is the wrong choice. + remarkNoMetadata(CB, ORE); + return ClFallbackToken; } }; @@ -286,7 +249,7 @@ public: : Options(transformOptionsFromCl(std::move(Opts))), Mod(M), FAM(MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager()), Mode(IncrementMode(*IntPtrTy, *Options.MaxTokens)) { - switch (ClMode.getValue()) { + switch (Options.Mode) { case TokenMode::Increment: break; case TokenMode::Random: diff --git a/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp index d18c0d0..80e77e09 100644 --- a/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp @@ -2020,7 +2020,6 @@ static void moveFastMathFlags(Function &F, F.removeFnAttr(attr); \ FMF.set##setter(); \ } - MOVE_FLAG("unsafe-fp-math", Fast) MOVE_FLAG("no-infs-fp-math", NoInfs) MOVE_FLAG("no-nans-fp-math", NoNaNs) MOVE_FLAG("no-signed-zeros-fp-math", NoSignedZeros) diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index ff25ef5..48cf763 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -4051,7 +4051,7 @@ static bool canNarrowLoad(VPWidenRecipe *WideMember0, unsigned OpIdx, static std::optional<ElementCount> isConsecutiveInterleaveGroup( VPInterleaveRecipe *InterleaveR, ArrayRef<ElementCount> VFs, VPTypeAnalysis &TypeInfo, const TargetTransformInfo &TTI) { - if (!InterleaveR) + if (!InterleaveR || InterleaveR->getMask()) return std::nullopt; Type *GroupElementTy = nullptr; diff --git a/llvm/lib/Transforms/Vectorize/VPlanValue.h b/llvm/lib/Transforms/Vectorize/VPlanValue.h index 0678bc90..83e3fca 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanValue.h +++ b/llvm/lib/Transforms/Vectorize/VPlanValue.h @@ -41,10 +41,10 @@ class VPRecipeBase; class VPInterleaveBase; class VPPhiAccessors; -// This is the base class of the VPlan Def/Use graph, used for modeling the data -// flow into, within and out of the VPlan. VPValues can stand for live-ins -// coming from the input IR and instructions which VPlan will generate if -// executed. +/// This is the base class of the VPlan Def/Use graph, used for modeling the +/// data flow into, within and out of the VPlan. VPValues can stand for live-ins +/// coming from the input IR and instructions which VPlan will generate if +/// executed. class LLVM_ABI_FOR_TEST VPValue { friend class VPDef; friend struct VPDoubleValueDef; @@ -57,7 +57,7 @@ class LLVM_ABI_FOR_TEST VPValue { SmallVector<VPUser *, 1> Users; protected: - // Hold the underlying Value, if any, attached to this VPValue. + /// Hold the underlying Value, if any, attached to this VPValue. Value *UnderlyingVal; /// Pointer to the VPDef that defines this VPValue. If it is nullptr, the diff --git a/llvm/test/Analysis/BasicAA/matrix-intrinsics.ll b/llvm/test/Analysis/BasicAA/matrix-intrinsics.ll new file mode 100644 index 0000000..1de8ab5 --- /dev/null +++ b/llvm/test/Analysis/BasicAA/matrix-intrinsics.ll @@ -0,0 +1,30 @@ +; RUN: opt %s -aa-pipeline=basic-aa -passes=aa-eval -print-all-alias-modref-info -disable-output 2>&1 | FileCheck %s + +; BasicAA should prove that loads from sufficiently large static offsets +; don't overlap with matrix loads with a statically known size. + +define <8 x double> @non_overlapping_strided_load(ptr %src) { +; CHECK-LABEL: Function: non_overlapping_strided_load: +; Just Ref: %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 8, i1 false, i32 4, i32 2) <-> call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l, ptr %src, i32 8, i1 false, i32 4, i32 2) +; Just Mod: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l, ptr %src, i32 8, i1 false, i32 4, i32 2) <-> %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 8, i1 false, i32 4, i32 2) +entry: + %src.offset = getelementptr inbounds double, ptr %src, i32 12 + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 8, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store(<8 x double> %l, ptr %src, i32 8, i1 false, i32 4, i32 2) + ret <8 x double> %l +} + +define <8 x double> @overlapping_strided_load(ptr %src) { +; CHECK-LABEL: Function: overlapping_strided_load: +; CHECK: Just Ref: %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 8, i1 false, i32 4, i32 2) <-> call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l, ptr %src, i32 8, i1 false, i32 4, i32 2) +; CHECK: Just Mod: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l, ptr %src, i32 8, i1 false, i32 4, i32 2) <-> %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 8, i1 false, i32 4, i32 2) +; +entry: + %src.offset = getelementptr inbounds double, ptr %src, i32 11 + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 8, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store(<8 x double> %l, ptr %src, i32 8, i1 false, i32 4, i32 2) + ret <8 x double> %l +} + +declare <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr, i32, i1, i32, i32) +declare void @llvm.matrix.column.major.store.v8f64.i32(<8 x double>, ptr, i32, i1, i32, i32) diff --git a/llvm/test/Assembler/autoupgrade-invalid-masked-align.ll b/llvm/test/Assembler/autoupgrade-invalid-masked-align.ll new file mode 100644 index 0000000..458bd2e --- /dev/null +++ b/llvm/test/Assembler/autoupgrade-invalid-masked-align.ll @@ -0,0 +1,49 @@ +; RUN: split-file %s %t +; RUN: not llvm-as < %t/masked-store.ll 2>&1 | FileCheck %s --check-prefix=MASKED-STORE +; RUN: not llvm-as < %t/masked-store-zero.ll 2>&1 | FileCheck %s --check-prefix=MASKED-STORE-ZERO +; RUN: not llvm-as < %t/masked-load.ll 2>&1 | FileCheck %s --check-prefix=MASKED-LOAD +; RUN: not llvm-as < %t/masked-load-zero.ll 2>&1 | FileCheck %s --check-prefix=MASKED-LOAD-ZERO +; RUN: not llvm-as < %t/masked-scatter.ll 2>&1 | FileCheck %s --check-prefix=MASKED-SCATTER +; RUN: not llvm-as < %t/masked-gather.ll 2>&1 | FileCheck %s --check-prefix=MASKED-GATHER + +;--- masked-store.ll +; MASKED-STORE: LLVM ERROR: Invalid alignment argument +define void @masked_store(ptr %ptr, <2 x i1> %mask, <2 x double> %val) { + call void @llvm.masked.store.v2f64.p0(<2 x double> %val, ptr %ptr, i32 3, <2 x i1> %mask) + ret void +} + +;--- masked-store-zero.ll +; MASKED-STORE-ZERO: LLVM ERROR: Invalid zero alignment argument +define void @masked_store_zero(ptr %ptr, <2 x i1> %mask, <2 x double> %val) { + call void @llvm.masked.store.v2f64.p0(<2 x double> %val, ptr %ptr, i32 0, <2 x i1> %mask) + ret void +} + +;--- masked-load.ll +; MASKED-LOAD: LLVM ERROR: Invalid alignment argument +define void @masked_load(ptr %ptr, <2 x i1> %mask, <2 x double> %val) { + call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 3, <2 x i1> %mask, <2 x double> %val) + ret void +} + +;--- masked-load-zero.ll +; MASKED-LOAD-ZERO: LLVM ERROR: Invalid zero alignment argument +define void @masked_load_zero(ptr %ptr, <2 x i1> %mask, <2 x double> %val) { + call <2 x double> @llvm.masked.load.v2f64.p0(ptr %ptr, i32 0, <2 x i1> %mask, <2 x double> %val) + ret void +} + +;--- masked-scatter.ll +; MASKED-SCATTER: LLVM ERROR: Invalid alignment argument +define void @masked_scatter(<2 x ptr> %ptr, <2 x i1> %mask, <2 x double> %val) { + call void @llvm.masked.scatter.v2f64.p0(<2 x double> %val, <2 x ptr> %ptr, i32 3, <2 x i1> %mask) + ret void +} + +;--- masked-gather.ll +; MASKED-GATHER: LLVM ERROR: Invalid alignment argument +define void @masked_gather(<2 x ptr> %ptr, <2 x i1> %mask, <2 x double> %val) { + call <2 x double> @llvm.masked.gather.v2f64.p0(<2 x ptr> %ptr, i32 3, <2 x i1> %mask, <2 x double> %val) + ret void +} diff --git a/llvm/test/Bitcode/upgrade-branch-protection.ll b/llvm/test/Bitcode/upgrade-branch-protection.ll index 1b33e39..6f60ba5 100644 --- a/llvm/test/Bitcode/upgrade-branch-protection.ll +++ b/llvm/test/Bitcode/upgrade-branch-protection.ll @@ -1,8 +1,11 @@ -;; Test that module flags "branch-target-enforcement" and "sign-return-address" can be upgraded to -;; are upgraded from Error to Min. +;; Test that module flags "branch-target-enforcement" and "sign-return-address" +;; can be upgraded to are upgraded from Error to Min and the value is changed 2 +;; as the module is converted to the semantic. ; RUN: llvm-as %s -o - | llvm-dis - | FileCheck %s +target triple = "aarch64-unknown-linux-gnu" + !llvm.module.flags = !{!0, !1, !2, !3} !0 = !{i32 1, !"branch-target-enforcement", i32 1} @@ -10,7 +13,7 @@ !2 = !{i32 1, !"sign-return-address-all", i32 1} !3 = !{i32 1, !"sign-return-address-with-bkey", i32 1} -;CHECK: !0 = !{i32 8, !"branch-target-enforcement", i32 1} -;CHECK: !1 = !{i32 8, !"sign-return-address", i32 1} -;CHECK: !2 = !{i32 8, !"sign-return-address-all", i32 1} -;CHECK: !3 = !{i32 8, !"sign-return-address-with-bkey", i32 1}
\ No newline at end of file +;CHECK: !0 = !{i32 8, !"branch-target-enforcement", i32 2} +;CHECK: !1 = !{i32 8, !"sign-return-address", i32 2} +;CHECK: !2 = !{i32 8, !"sign-return-address-all", i32 2} +;CHECK: !3 = !{i32 8, !"sign-return-address-with-bkey", i32 2} diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir index 97a0417..b040ff2 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir @@ -56,7 +56,7 @@ } - attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2" "unsafe-fp-math"="false" "use-soft-float"="false" } + attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind } attributes #2 = { optsize } attributes #3 = { minsize } diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir index fc4fbac..f24aeae 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir @@ -47,7 +47,7 @@ ret void } - attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2" "unsafe-fp-math"="false" "use-soft-float"="false" } + attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind } ... diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir index b06cadf..e4d2ca3 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir @@ -50,7 +50,7 @@ declare void @llvm.stackprotector(ptr, ptr) #2 - attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2" "unsafe-fp-math"="false" "use-soft-float"="false" } + attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind } ... diff --git a/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll b/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll index 0c1776e..6e3682a 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll @@ -37,7 +37,7 @@ for.body: ; preds = %for.body, %entry ; Function Attrs: nounwind readnone declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1 -attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" } attributes #1 = { nounwind readnone } !llvm.dbg.cu = !{!0} diff --git a/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll b/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll index f2ed57e..353e818 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll @@ -325,7 +325,7 @@ entry: declare void @hhh(double, double) -attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="true" "use-soft-float"="false" } -attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="true" "use-soft-float"="false" } +attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "use-soft-float"="false" } +attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "use-soft-float"="false" } attributes #2 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll index 7e97116..8da0e11 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll @@ -694,8 +694,8 @@ bb1: ; CHECK: .[[LABEL]]: ; CHECK: ret -attributes #0 = { "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" } +attributes #1 = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" } !1 = !{!2, !2, i64 0} !2 = !{!"int", !3, i64 0} diff --git a/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll b/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll index 296435a..937bfe4 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll @@ -519,8 +519,8 @@ while.cond: br label %while.cond } -attributes #0 = { nounwind readonly "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { nounwind readonly "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" } +attributes #1 = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" } attributes #2 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/aarch64-mov-debug-locs.mir b/llvm/test/CodeGen/AArch64/aarch64-mov-debug-locs.mir index 45fa2be5..c05d661 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-mov-debug-locs.mir +++ b/llvm/test/CodeGen/AArch64/aarch64-mov-debug-locs.mir @@ -79,8 +79,8 @@ ; Function Attrs: nounwind declare void @llvm.stackprotector(ptr, ptr) #3 - attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } - attributes #1 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } + attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" } + attributes #1 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" } attributes #2 = { nounwind readnone speculatable } attributes #3 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/arm64-detect-vec-redux.ll b/llvm/test/CodeGen/AArch64/arm64-detect-vec-redux.ll index 4e86f52..071344d 100644 --- a/llvm/test/CodeGen/AArch64/arm64-detect-vec-redux.ll +++ b/llvm/test/CodeGen/AArch64/arm64-detect-vec-redux.ll @@ -47,6 +47,6 @@ declare <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64>, <2 x i64>) #1 ; Function Attrs: nounwind readnone declare <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32>, <2 x i32>) #1 -attributes #0 = { nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" } attributes #1 = { nounwind readnone } attributes #2 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/arm64-fma-combine-with-fpfusion.ll b/llvm/test/CodeGen/AArch64/arm64-fma-combine-with-fpfusion.ll index 9b3d539..0ddcdcc 100644 --- a/llvm/test/CodeGen/AArch64/arm64-fma-combine-with-fpfusion.ll +++ b/llvm/test/CodeGen/AArch64/arm64-fma-combine-with-fpfusion.ll @@ -8,5 +8,5 @@ define float @mul_add(float %a, float %b, float %c) local_unnamed_addr #0 { ret float %add } -attributes #0 = { norecurse nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { norecurse nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" } diff --git a/llvm/test/CodeGen/AArch64/arm64-fma-combines.ll b/llvm/test/CodeGen/AArch64/arm64-fma-combines.ll index e17a0a9..54f752e 100644 --- a/llvm/test/CodeGen/AArch64/arm64-fma-combines.ll +++ b/llvm/test/CodeGen/AArch64/arm64-fma-combines.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -O=3 -mtriple=arm64-apple-ios -mcpu=cyclone -mattr=+fullfp16 -enable-unsafe-fp-math -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -O=3 -mtriple=arm64-apple-ios -mcpu=cyclone -mattr=+fullfp16 -verify-machineinstrs | FileCheck %s define void @foo_2d(ptr %src) { ; CHECK-LABEL: %entry diff --git a/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll b/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll index d2ce7e6..41f57bf 100644 --- a/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll +++ b/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll @@ -84,7 +84,7 @@ bb3: ; preds = %bb3, %bb ; Function Attrs: nounwind readnone declare i64 @llvm.objectsize.i64.p0(ptr, i1) #1 -attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" } attributes #1 = { nounwind readnone } !1 = !{!2, !2, i64 0} diff --git a/llvm/test/CodeGen/AArch64/arm64-ld1.ll b/llvm/test/CodeGen/AArch64/arm64-ld1.ll index 0b22fa4..c2b2c1e 100644 --- a/llvm/test/CodeGen/AArch64/arm64-ld1.ll +++ b/llvm/test/CodeGen/AArch64/arm64-ld1.ll @@ -1654,24 +1654,14 @@ define %struct.__neon_float64x2x4_t @ld1_x4_v2f64(ptr %addr) { } define <8 x i8> @dup_ld1_from_stack(ptr %__ret) { -; CHECK-SD-LABEL: dup_ld1_from_stack: -; CHECK-SD: // %bb.0: // %entry -; CHECK-SD-NEXT: sub sp, sp, #16 -; CHECK-SD-NEXT: .cfi_def_cfa_offset 16 -; CHECK-SD-NEXT: add x8, sp, #15 -; CHECK-SD-NEXT: ld1r.8b { v0 }, [x8] -; CHECK-SD-NEXT: add sp, sp, #16 -; CHECK-SD-NEXT: ret -; -; CHECK-GI-LABEL: dup_ld1_from_stack: -; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 16 -; CHECK-GI-NEXT: .cfi_offset w29, -16 -; CHECK-GI-NEXT: add x8, sp, #15 -; CHECK-GI-NEXT: ld1r.8b { v0 }, [x8] -; CHECK-GI-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload -; CHECK-GI-NEXT: ret +; CHECK-LABEL: dup_ld1_from_stack: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: add x8, sp, #15 +; CHECK-NEXT: ld1r.8b { v0 }, [x8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret entry: %item = alloca i8, align 1 %0 = load i8, ptr %item, align 1 diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll index 4cdc6cc..c6cf240 100644 --- a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll +++ b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll @@ -107,7 +107,7 @@ define <4 x float> @neon4xfloat(<4 x float> %A, <4 x float> %B) { ; Function Attrs: nounwind declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #1 -attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" } attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll index 82b34ef..bb1a6b0 100644 --- a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll +++ b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll @@ -108,5 +108,5 @@ for.end: ; preds = %for.cond ; Function Attrs: nounwind declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #1 -attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" } attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/arm64-rounding.ll b/llvm/test/CodeGen/AArch64/arm64-rounding.ll index d487aab..3ce35bf 100644 --- a/llvm/test/CodeGen/AArch64/arm64-rounding.ll +++ b/llvm/test/CodeGen/AArch64/arm64-rounding.ll @@ -201,4 +201,4 @@ entry: } attributes #0 = { nounwind } -attributes #1 = { nounwind "unsafe-fp-math"="true" } +attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/arm64-storebytesmerge.ll b/llvm/test/CodeGen/AArch64/arm64-storebytesmerge.ll index db65fdd..1486b3a 100644 --- a/llvm/test/CodeGen/AArch64/arm64-storebytesmerge.ll +++ b/llvm/test/CodeGen/AArch64/arm64-storebytesmerge.ll @@ -36,6 +36,6 @@ for.end705.i: ; preds = %for.body453.i declare void @f() local_unnamed_addr #1 -attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a57" "target-features"="+crc,+crypto,+fp-armv8,+neon" "unsafe-fp-math"="true" "use-soft-float"="false" } -attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a57" "target-features"="+crc,+crypto,+fp-armv8,+neon" "unsafe-fp-math"="true" "use-soft-float"="false" } +attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a57" "target-features"="+crc,+crypto,+fp-armv8,+neon" "use-soft-float"="false" } +attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a57" "target-features"="+crc,+crypto,+fp-armv8,+neon" "use-soft-float"="false" } attributes #2 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll b/llvm/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll index fc59350..593d629 100644 --- a/llvm/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll +++ b/llvm/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll @@ -18,7 +18,7 @@ entry: ret i32 %1 } -attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="true" "use-soft-float"="false" } +attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "use-soft-float"="false" } !llvm.ident = !{!0} diff --git a/llvm/test/CodeGen/AArch64/bti-branch-relaxation.ll b/llvm/test/CodeGen/AArch64/bti-branch-relaxation.ll index 2e3b99f..c4bf7d2 100644 --- a/llvm/test/CodeGen/AArch64/bti-branch-relaxation.ll +++ b/llvm/test/CodeGen/AArch64/bti-branch-relaxation.ll @@ -61,4 +61,4 @@ declare dso_local void @e(...) local_unnamed_addr #0 declare dso_local i64 @llvm.aarch64.space(i32, i64) local_unnamed_addr #0 -attributes #0 = { nounwind "branch-target-enforcement" "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon,+v8.5a" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { nounwind "branch-target-enforcement" "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon,+v8.5a" "use-soft-float"="false" } diff --git a/llvm/test/CodeGen/AArch64/consthoist-gep.ll b/llvm/test/CodeGen/AArch64/consthoist-gep.ll index 031ee35..7d2aaec 100644 --- a/llvm/test/CodeGen/AArch64/consthoist-gep.ll +++ b/llvm/test/CodeGen/AArch64/consthoist-gep.ll @@ -108,7 +108,7 @@ bb19: ; preds = %bb3, %bb ret void } -attributes #0 = { norecurse nounwind optsize ssp "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { norecurse nounwind optsize ssp "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" } !llvm.module.flags = !{!0, !1} !llvm.ident = !{!2} diff --git a/llvm/test/CodeGen/AArch64/csel-zero-float.ll b/llvm/test/CodeGen/AArch64/csel-zero-float.ll index 6edde13..56a33cc 100644 --- a/llvm/test/CodeGen/AArch64/csel-zero-float.ll +++ b/llvm/test/CodeGen/AArch64/csel-zero-float.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=aarch64-none-linux-gnu -enable-unsafe-fp-math < %s +; RUN: llc -mtriple=aarch64-none-linux-gnu < %s ; There is no invocation to FileCheck as this ; caused a crash in "Post-RA pseudo instruction expansion" diff --git a/llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll b/llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll index 61df396..e561481 100644 --- a/llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll +++ b/llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll @@ -32,5 +32,5 @@ main_: declare i32 @printf(ptr, ...) #1 -attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" } +attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" } diff --git a/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll b/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll index 1a83930..9193025 100644 --- a/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll +++ b/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll @@ -2,8 +2,8 @@ ; RUN: llc -mtriple=aarch64-linux-gnu -o - %s | FileCheck %s ; load zero-extended i32, bitcast to f64 -define double @_Z9load_u64_from_u32_testPj(ptr %n){ -; CHECK-LABEL: _Z9load_u64_from_u32_testPj: +define double @load_u64_from_u32(ptr %n){ +; CHECK-LABEL: load_u64_from_u32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr s0, [x0] ; CHECK-NEXT: ret @@ -15,8 +15,8 @@ entry: } ; load zero-extended i16, bitcast to f64 -define double @_Z9load_u64_from_u16_testPj(ptr %n){ -; CHECK-LABEL: _Z9load_u64_from_u16_testPj: +define double @load_u64_from_u16(ptr %n){ +; CHECK-LABEL: load_u64_from_u16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr h0, [x0] ; CHECK-NEXT: ret @@ -28,8 +28,8 @@ entry: } ; load zero-extended i8, bitcast to f64 -define double @_Z16load_u64_from_u8Ph(ptr %n){ -; CHECK-LABEL: _Z16load_u64_from_u8Ph: +define double @load_u64_from_u8(ptr %n){ +; CHECK-LABEL: load_u64_from_u8: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr b0, [x0] ; CHECK-NEXT: ret @@ -41,8 +41,8 @@ entry: } ; load zero-extended i16, bitcast to f32 -define float @_Z17load_u32_from_u16Pt(ptr %n){ -; CHECK-LABEL: _Z17load_u32_from_u16Pt: +define float @load_u32_from_u16(ptr %n){ +; CHECK-LABEL: load_u32_from_u16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr h0, [x0] ; CHECK-NEXT: ret @@ -54,8 +54,8 @@ entry: } ; load zero-extended i8, bitcast to f32 -define float @_Z16load_u32_from_u8Ph(ptr %n){ -; CHECK-LABEL: _Z16load_u32_from_u8Ph: +define float @load_u32_from_u8(ptr %n){ +; CHECK-LABEL: load_u32_from_u8: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr b0, [x0] ; CHECK-NEXT: ret @@ -67,8 +67,8 @@ entry: } ; load zero-extended i8, bitcast to f16 -define half @_Z16load_u16_from_u8Ph(ptr %n){ -; CHECK-LABEL: _Z16load_u16_from_u8Ph: +define half @load_u16_from_u8(ptr %n){ +; CHECK-LABEL: load_u16_from_u8: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr b0, [x0] ; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0 @@ -80,3 +80,504 @@ entry: ret half %1 } + +define double @load_u64_from_u32_off1(ptr %n){ +; CHECK-LABEL: load_u64_from_u32_off1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldur w8, [x0, #1] +; CHECK-NEXT: fmov d0, x8 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 1 + %0 = load i32, ptr %p, align 4 + %conv = zext i32 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define double @load_u64_from_u16_off1(ptr %n){ +; CHECK-LABEL: load_u64_from_u16_off1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldurh w8, [x0, #1] +; CHECK-NEXT: fmov d0, x8 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 1 + %0 = load i16, ptr %p, align 2 + %conv = zext i16 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define double @load_u64_from_u8_off1(ptr %n){ +; CHECK-LABEL: load_u64_from_u8_off1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldrb w8, [x0, #1] +; CHECK-NEXT: fmov d0, x8 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 1 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define float @load_u32_from_u16_off1(ptr %n){ +; CHECK-LABEL: load_u32_from_u16_off1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldurh w8, [x0, #1] +; CHECK-NEXT: fmov s0, w8 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 1 + %0 = load i16, ptr %p, align 2 + %conv = zext i16 %0 to i32 + %1 = bitcast i32 %conv to float + ret float %1 +} + +define float @load_u32_from_u8_off1(ptr %n){ +; CHECK-LABEL: load_u32_from_u8_off1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldrb w8, [x0, #1] +; CHECK-NEXT: fmov s0, w8 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 1 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i32 + %1 = bitcast i32 %conv to float + ret float %1 +} + +define half @load_u16_from_u8_off1(ptr %n){ +; CHECK-LABEL: load_u16_from_u8_off1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldrb w8, [x0, #1] +; CHECK-NEXT: fmov s0, w8 +; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 1 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i16 + %1 = bitcast i16 %conv to half + ret half %1 +} + + + +define double @load_u64_from_u32_off2(ptr %n){ +; CHECK-LABEL: load_u64_from_u32_off2: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldur w8, [x0, #2] +; CHECK-NEXT: fmov d0, x8 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 2 + %0 = load i32, ptr %p, align 4 + %conv = zext i32 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define double @load_u64_from_u16_off2(ptr %n){ +; CHECK-LABEL: load_u64_from_u16_off2: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldrh w8, [x0, #2] +; CHECK-NEXT: fmov d0, x8 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 2 + %0 = load i16, ptr %p, align 2 + %conv = zext i16 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define double @load_u64_from_u8_off2(ptr %n){ +; CHECK-LABEL: load_u64_from_u8_off2: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldrb w8, [x0, #2] +; CHECK-NEXT: fmov d0, x8 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 2 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define float @load_u32_from_u16_off2(ptr %n){ +; CHECK-LABEL: load_u32_from_u16_off2: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr h0, [x0, #2] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 2 + %0 = load i16, ptr %p, align 2 + %conv = zext i16 %0 to i32 + %1 = bitcast i32 %conv to float + ret float %1 +} + +define float @load_u32_from_u8_off2(ptr %n){ +; CHECK-LABEL: load_u32_from_u8_off2: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr b0, [x0, #1] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 2 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i32 + %1 = bitcast i32 %conv to float + ret float %1 +} + +define half @load_u16_from_u8_off2(ptr %n){ +; CHECK-LABEL: load_u16_from_u8_off2: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr b0, [x0, #1] +; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 2 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i16 + %1 = bitcast i16 %conv to half + ret half %1 +} + + + +define double @load_u64_from_u32_off255(ptr %n){ +; CHECK-LABEL: load_u64_from_u32_off255: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldur w8, [x0, #255] +; CHECK-NEXT: fmov d0, x8 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 255 + %0 = load i32, ptr %p, align 4 + %conv = zext i32 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define double @load_u64_from_u16_off255(ptr %n){ +; CHECK-LABEL: load_u64_from_u16_off255: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldurh w8, [x0, #255] +; CHECK-NEXT: fmov d0, x8 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 255 + %0 = load i16, ptr %p, align 2 + %conv = zext i16 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define double @load_u64_from_u8_off255(ptr %n){ +; CHECK-LABEL: load_u64_from_u8_off255: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldrb w8, [x0, #255] +; CHECK-NEXT: fmov d0, x8 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 255 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define float @load_u32_from_u16_off255(ptr %n){ +; CHECK-LABEL: load_u32_from_u16_off255: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldurh w8, [x0, #255] +; CHECK-NEXT: fmov s0, w8 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 255 + %0 = load i16, ptr %p, align 2 + %conv = zext i16 %0 to i32 + %1 = bitcast i32 %conv to float + ret float %1 +} + +define float @load_u32_from_u8_off255(ptr %n){ +; CHECK-LABEL: load_u32_from_u8_off255: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldrb w8, [x0, #255] +; CHECK-NEXT: fmov s0, w8 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 255 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i32 + %1 = bitcast i32 %conv to float + ret float %1 +} + +define half @load_u16_from_u8_off255(ptr %n){ +; CHECK-LABEL: load_u16_from_u8_off255: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldrb w8, [x0, #255] +; CHECK-NEXT: fmov s0, w8 +; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 255 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i16 + %1 = bitcast i16 %conv to half + ret half %1 +} + + +define double @load_u64_from_u32_off256(ptr %n){ +; CHECK-LABEL: load_u64_from_u32_off256: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr s0, [x0, #256] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 256 + %0 = load i32, ptr %p, align 4 + %conv = zext i32 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define double @load_u64_from_u16_off256(ptr %n){ +; CHECK-LABEL: load_u64_from_u16_off256: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr h0, [x0, #128] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 256 + %0 = load i16, ptr %p, align 2 + %conv = zext i16 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define double @load_u64_from_u8_off256(ptr %n){ +; CHECK-LABEL: load_u64_from_u8_off256: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr b0, [x0, #64] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 256 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define float @load_u32_from_u16_off256(ptr %n){ +; CHECK-LABEL: load_u32_from_u16_off256: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr h0, [x0, #256] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 256 + %0 = load i16, ptr %p, align 2 + %conv = zext i16 %0 to i32 + %1 = bitcast i32 %conv to float + ret float %1 +} + +define float @load_u32_from_u8_off256(ptr %n){ +; CHECK-LABEL: load_u32_from_u8_off256: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr b0, [x0, #128] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 256 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i32 + %1 = bitcast i32 %conv to float + ret float %1 +} + +define half @load_u16_from_u8_off256(ptr %n){ +; CHECK-LABEL: load_u16_from_u8_off256: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr b0, [x0, #128] +; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 256 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i16 + %1 = bitcast i16 %conv to half + ret half %1 +} + + + +define double @load_u64_from_u32_offn(ptr %n){ +; CHECK-LABEL: load_u64_from_u32_offn: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr s0, [x0, #16380] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 16380 + %0 = load i32, ptr %p, align 4 + %conv = zext i32 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define double @load_u64_from_u16_offn(ptr %n){ +; CHECK-LABEL: load_u64_from_u16_offn: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #8190 // =0x1ffe +; CHECK-NEXT: ldr h0, [x0, x8] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 8190 + %0 = load i16, ptr %p, align 2 + %conv = zext i16 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define double @load_u64_from_u8_offn(ptr %n){ +; CHECK-LABEL: load_u64_from_u8_offn: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr b0, [x0, #4095] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 4095 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define float @load_u32_from_u16_offn(ptr %n){ +; CHECK-LABEL: load_u32_from_u16_offn: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr h0, [x0, #8190] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 8190 + %0 = load i16, ptr %p, align 2 + %conv = zext i16 %0 to i32 + %1 = bitcast i32 %conv to float + ret float %1 +} + +define float @load_u32_from_u8_offn(ptr %n){ +; CHECK-LABEL: load_u32_from_u8_offn: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr b0, [x0, #4095] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 4095 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i32 + %1 = bitcast i32 %conv to float + ret float %1 +} + +define half @load_u16_from_u8_offn(ptr %n){ +; CHECK-LABEL: load_u16_from_u8_offn: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr b0, [x0, #4095] +; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 4095 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i16 + %1 = bitcast i16 %conv to half + ret half %1 +} + + +define double @load_u64_from_u32_offnp1(ptr %n){ +; CHECK-LABEL: load_u64_from_u32_offnp1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: add x8, x0, #4, lsl #12 // =16384 +; CHECK-NEXT: ldr s0, [x8] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 16384 + %0 = load i32, ptr %p, align 4 + %conv = zext i32 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define double @load_u64_from_u16_offnp1(ptr %n){ +; CHECK-LABEL: load_u64_from_u16_offnp1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr h0, [x0, #4096] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 8192 + %0 = load i16, ptr %p, align 2 + %conv = zext i16 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define double @load_u64_from_u8_offnp1(ptr %n){ +; CHECK-LABEL: load_u64_from_u8_offnp1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr b0, [x0, #1024] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 4096 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i64 + %1 = bitcast i64 %conv to double + ret double %1 +} + +define float @load_u32_from_u16_offnp1(ptr %n){ +; CHECK-LABEL: load_u32_from_u16_offnp1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: add x8, x0, #2, lsl #12 // =8192 +; CHECK-NEXT: ldr h0, [x8] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 8192 + %0 = load i16, ptr %p, align 2 + %conv = zext i16 %0 to i32 + %1 = bitcast i32 %conv to float + ret float %1 +} + +define float @load_u32_from_u8_offnp1(ptr %n){ +; CHECK-LABEL: load_u32_from_u8_offnp1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr b0, [x0, #2048] +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 4096 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i32 + %1 = bitcast i32 %conv to float + ret float %1 +} + +define half @load_u16_from_u8_offnp1(ptr %n){ +; CHECK-LABEL: load_u16_from_u8_offnp1: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr b0, [x0, #2048] +; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0 +; CHECK-NEXT: ret +entry: + %p = getelementptr i8, ptr %n, i64 4096 + %0 = load i8, ptr %p, align 1 + %conv = zext i8 %0 to i16 + %1 = bitcast i16 %conv to half + ret half %1 +} + diff --git a/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll b/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll index c2ef2fa..00a8c30 100644 --- a/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll +++ b/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll @@ -74,7 +74,7 @@ for.body: ; preds = %for.body.preheader, br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !10 } -attributes #0 = { nofree norecurse nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="non-leaf" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { nofree norecurse nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="non-leaf" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" } !llvm.module.flags = !{!0} !llvm.ident = !{!1} diff --git a/llvm/test/CodeGen/AArch64/recp-fastmath.ll b/llvm/test/CodeGen/AArch64/recp-fastmath.ll index 9f00621..fa1da33 100644 --- a/llvm/test/CodeGen/AArch64/recp-fastmath.ll +++ b/llvm/test/CodeGen/AArch64/recp-fastmath.ll @@ -164,5 +164,5 @@ define <4 x double> @d4recp1(<4 x double> %x) #1 { ; CHECK-NOT: frecps {{v[0-7]\.2d}}, {{v[0-7]\.2d}}, {{v[0-7]\.2d}} } -attributes #0 = { nounwind "unsafe-fp-math"="true" } -attributes #1 = { nounwind "unsafe-fp-math"="true" "reciprocal-estimates"="div,vec-div" } +attributes #0 = { nounwind } +attributes #1 = { nounwind "reciprocal-estimates"="div,vec-div" } diff --git a/llvm/test/CodeGen/AArch64/shrink-wrap-const-pool-access.mir b/llvm/test/CodeGen/AArch64/shrink-wrap-const-pool-access.mir new file mode 100644 index 0000000..6f33a75 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/shrink-wrap-const-pool-access.mir @@ -0,0 +1,76 @@ +# RUN: llc -mtriple=aarch64 -simplify-mir -run-pass=shrink-wrap -o - %s | FileCheck %s +--- | + declare double @foo() + + define double @shrink_wrap_load_from_const_pool(double %q) { + entry: + %0 = fcmp oeq double %q, 3.125500e+02 + br i1 %0, label %common.ret, label %if.else + + common.ret: ; preds = %if.else, %entry, %exit1 + %common.ret.op = phi double [ %3, %exit1 ], [ 0.000000e+00, %entry ], [ 0.000000e+00, %if.else ] + ret double %common.ret.op + + if.else: ; preds = %entry + %1 = call double @foo() + %2 = fcmp oeq double %1, 0.000000e+00 + br i1 %2, label %exit1, label %common.ret + + exit1: ; preds = %if.else + %3 = call double @foo() + br label %common.ret + } +... +# Following code has a load from constant pool. Accessing constant pool +# must not be considered as a stack access and hence, shrink wrapping must +# happen. +# CHECK-LABEL:name: shrink_wrap_load_from_const_pool +# CHECK: savePoint: +# CHECK: - point: '%bb.3' +# CHECK: restorePoint: +# CHECK: - point: '%bb.5' +--- +name: shrink_wrap_load_from_const_pool +tracksRegLiveness: true +constants: + - id: 0 + value: 'double 3.125500e+02' + alignment: 8 +body: | + bb.0.entry: + successors: %bb.4(0x50000000), %bb.2(0x30000000) + liveins: $d0 + + renamable $d1 = COPY $d0 + renamable $x8 = ADRP target-flags(aarch64-page) %const.0 + renamable $d2 = LDRDui killed renamable $x8, target-flags(aarch64-pageoff, aarch64-nc) %const.0 :: (load (s64) from constant-pool) + renamable $d0 = FMOVD0 + nofpexcept FCMPDrr killed renamable $d1, killed renamable $d2, implicit-def $nzcv, implicit $fpcr + Bcc 1, %bb.2, implicit killed $nzcv + + bb.4: + liveins: $d0 + + bb.1.common.ret: + liveins: $d0 + + RET_ReallyLR implicit $d0 + + bb.2.if.else: + successors: %bb.3(0x50000000), %bb.1(0x30000000) + + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + BL @foo, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $d0 + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + renamable $d1 = COPY $d0 + renamable $d0 = FMOVD0 + nofpexcept FCMPDri killed renamable $d1, implicit-def $nzcv, implicit $fpcr + Bcc 1, %bb.1, implicit killed $nzcv + B %bb.3 + + bb.3.exit1: + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + BL @foo, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $d0 + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + B %bb.1 +... diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-ex-1.ll b/llvm/test/CodeGen/AArch64/stack-tagging-ex-1.ll index 66ac04e..22abb8c 100644 --- a/llvm/test/CodeGen/AArch64/stack-tagging-ex-1.ll +++ b/llvm/test/CodeGen/AArch64/stack-tagging-ex-1.ll @@ -64,6 +64,6 @@ declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 ; Function Attrs: argmemonly nounwind willreturn declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 -attributes #0 = { sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind willreturn } attributes #2 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-ex-2.ll b/llvm/test/CodeGen/AArch64/stack-tagging-ex-2.ll index e5725bc..d689a76 100644 --- a/llvm/test/CodeGen/AArch64/stack-tagging-ex-2.ll +++ b/llvm/test/CodeGen/AArch64/stack-tagging-ex-2.ll @@ -158,10 +158,10 @@ eh.resume: ; preds = %lpad.body resume { ptr, i32 } %eh.lpad-body } -attributes #0 = { noreturn sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { noreturn sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind willreturn } attributes #2 = { nounwind readnone } -attributes #3 = { norecurse sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #3 = { norecurse sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "use-soft-float"="false" } attributes #4 = { nounwind } attributes #5 = { noreturn } diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll index 91adf82..7483622 100644 --- a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll +++ b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll @@ -77,6 +77,6 @@ declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.lifetime.end.p0(ptr nocapture) #1 -attributes #0 = { sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "use-soft-float"="false" } attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/stack_guard_remat.ll b/llvm/test/CodeGen/AArch64/stack_guard_remat.ll index 523eda61..e41d82c 100644 --- a/llvm/test/CodeGen/AArch64/stack_guard_remat.ll +++ b/llvm/test/CodeGen/AArch64/stack_guard_remat.ll @@ -54,7 +54,7 @@ declare void @foo3(ptr) ; Function Attrs: nounwind declare void @llvm.lifetime.end.p0(i64, ptr nocapture) -attributes #0 = { nounwind sspstrong "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { nounwind sspstrong "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" } ;--- pic.ll !llvm.module.flags = !{!0} diff --git a/llvm/test/CodeGen/AArch64/svtcf-fmul-fdiv-combine.ll b/llvm/test/CodeGen/AArch64/svtcf-fmul-fdiv-combine.ll index f78fcea..b8dcd6f 100644 --- a/llvm/test/CodeGen/AArch64/svtcf-fmul-fdiv-combine.ll +++ b/llvm/test/CodeGen/AArch64/svtcf-fmul-fdiv-combine.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc -mtriple aarch64-none-linux-gnu -enable-unsafe-fp-math -mattr=+fullfp16 < %s | FileCheck %s +; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+fullfp16 < %s | FileCheck %s define half @scvtf_f16_2(i32 %state) { ; CHECK-LABEL: scvtf_f16_2: diff --git a/llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll b/llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll index 623ea22..89b3b89 100644 --- a/llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll +++ b/llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll @@ -24,7 +24,7 @@ define void @fn(ptr %argA, ptr %argB, ptr %a) #0 align 2 { ; CHECK: ret -attributes #0 = { noinline norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "polly-optimized" "stack-protector-buffer-size"="8" "target-features"="+crc,+crypto,+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { noinline norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "polly-optimized" "stack-protector-buffer-size"="8" "target-features"="+crc,+crypto,+neon" "use-soft-float"="false" } !llvm.ident = !{!0} diff --git a/llvm/test/CodeGen/AArch64/wineh-frame5.mir b/llvm/test/CodeGen/AArch64/wineh-frame5.mir index 97c5c85..32580f4 100644 --- a/llvm/test/CodeGen/AArch64/wineh-frame5.mir +++ b/llvm/test/CodeGen/AArch64/wineh-frame5.mir @@ -64,9 +64,9 @@ ; Function Attrs: nounwind declare void @llvm.stackprotector(ptr, ptr) #3 - attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } + attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind } - attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } + attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" } attributes #3 = { nounwind } ... diff --git a/llvm/test/CodeGen/AArch64/wineh-frame6.mir b/llvm/test/CodeGen/AArch64/wineh-frame6.mir index 5ba7842..d76fae1 100644 --- a/llvm/test/CodeGen/AArch64/wineh-frame6.mir +++ b/llvm/test/CodeGen/AArch64/wineh-frame6.mir @@ -47,8 +47,8 @@ ; Function Attrs: nounwind declare void @llvm.stackprotector(ptr, ptr) #2 - attributes #0 = { noinline optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } - attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } + attributes #0 = { noinline optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" } + attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" } attributes #2 = { nounwind } ... diff --git a/llvm/test/CodeGen/AArch64/wineh-frame7.mir b/llvm/test/CodeGen/AArch64/wineh-frame7.mir index 1599098..d4e71d9 100644 --- a/llvm/test/CodeGen/AArch64/wineh-frame7.mir +++ b/llvm/test/CodeGen/AArch64/wineh-frame7.mir @@ -71,8 +71,8 @@ ; Function Attrs: nounwind declare void @llvm.stackprotector(ptr, ptr) #2 - attributes #0 = { noinline optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } - attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } + attributes #0 = { noinline optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" } + attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" } attributes #2 = { nounwind } ... diff --git a/llvm/test/CodeGen/AArch64/wineh-frame8.mir b/llvm/test/CodeGen/AArch64/wineh-frame8.mir index 9de99ac..56f92f2 100644 --- a/llvm/test/CodeGen/AArch64/wineh-frame8.mir +++ b/llvm/test/CodeGen/AArch64/wineh-frame8.mir @@ -29,7 +29,7 @@ ret i32 %add } - attributes #0 = { noinline nounwind optnone uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } + attributes #0 = { noinline nounwind optnone uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" } ... --- diff --git a/llvm/test/CodeGen/AArch64/wineh5.mir b/llvm/test/CodeGen/AArch64/wineh5.mir index efdd4b0..1c09b78 100644 --- a/llvm/test/CodeGen/AArch64/wineh5.mir +++ b/llvm/test/CodeGen/AArch64/wineh5.mir @@ -73,8 +73,8 @@ ; Function Attrs: nounwind declare void @llvm.stackprotector(ptr, ptr) #2 - attributes #0 = { noinline optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } - attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } + attributes #0 = { noinline optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" } + attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" } attributes #2 = { nounwind } !llvm.module.flags = !{!0} diff --git a/llvm/test/CodeGen/AArch64/wineh_shrinkwrap.mir b/llvm/test/CodeGen/AArch64/wineh_shrinkwrap.mir index 2f631c2..52d0dff 100644 --- a/llvm/test/CodeGen/AArch64/wineh_shrinkwrap.mir +++ b/llvm/test/CodeGen/AArch64/wineh_shrinkwrap.mir @@ -56,9 +56,9 @@ ; Function Attrs: nounwind declare void @llvm.stackprotector(ptr, ptr) #3 - attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } + attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind } - attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" } + attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" } attributes #3 = { nounwind } !llvm.module.flags = !{!0} diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-unmerge-values.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-unmerge-values.mir index d9ac9a7..de1bb47 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-unmerge-values.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-unmerge-values.mir @@ -1,5 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -enable-unsafe-fp-math -run-pass=amdgpu-prelegalizer-combiner %s -o - | FileCheck -check-prefix=GFX10 %s +# RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -run-pass=amdgpu-prelegalizer-combiner %s -o - | FileCheck -check-prefix=GFX10 %s # Test that we fold correct element from G_UNMERGE_VALUES into fma diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fract.f64.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fract.f64.mir index 52b1beb..91f2f6f1 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fract.f64.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fract.f64.mir @@ -1,6 +1,6 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -enable-unsafe-fp-math -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=GFX10 -# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -enable-unsafe-fp-math -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=GFX11 +# RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=GFX10 +# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=GFX11 --- name: fract_f64_neg diff --git a/llvm/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll b/llvm/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll index 13206ad..f45070c 100644 --- a/llvm/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll +++ b/llvm/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll @@ -1,9 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn -mattr=+fast-fmaf,+mad-mac-f32-insts -denormal-fp-math-f32=preserve-sign -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-FLUSH %s -; RUN: llc -mtriple=amdgcn -mattr=-fast-fmaf,+mad-mac-f32-insts -denormal-fp-math-f32=preserve-sign -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-FLUSH %s +; RUN: llc -mtriple=amdgcn -mattr=+fast-fmaf,+mad-mac-f32-insts -denormal-fp-math-f32=preserve-sign < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-FLUSH %s +; RUN: llc -mtriple=amdgcn -mattr=-fast-fmaf,+mad-mac-f32-insts -denormal-fp-math-f32=preserve-sign < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-FLUSH %s -; RUN: llc -mtriple=amdgcn -mattr=+fast-fmaf,+mad-mac-f32-insts -denormal-fp-math-f32=ieee -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-FASTFMA %s -; RUN: llc -mtriple=amdgcn -mattr=-fast-fmaf,+mad-mac-f32-insts -denormal-fp-math-f32=ieee -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-SLOWFMA %s +; RUN: llc -mtriple=amdgcn -mattr=+fast-fmaf,+mad-mac-f32-insts -denormal-fp-math-f32=ieee < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-FASTFMA %s +; RUN: llc -mtriple=amdgcn -mattr=-fast-fmaf,+mad-mac-f32-insts -denormal-fp-math-f32=ieee < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-SLOWFMA %s ; FIXME: This should also fold when fma is actually fast if an FMA ; exists in the original program. diff --git a/llvm/test/CodeGen/AMDGPU/fpext.f16.ll b/llvm/test/CodeGen/AMDGPU/fpext.f16.ll index d41e2c6..8df7564 100644 --- a/llvm/test/CodeGen/AMDGPU/fpext.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fpext.f16.ll @@ -1,9 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -enable-unsafe-fp-math < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=SI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=GFX89,VI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=GFX89,GFX9 %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=GFX11-TRUE16 %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=GFX11-FAKE16 %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=SI %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=GFX89,VI %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx900 -mattr=-flat-for-global < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=GFX89,GFX9 %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -mattr=-flat-for-global < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=GFX11-TRUE16 %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -mattr=-flat-for-global < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=GFX11-FAKE16 %s define amdgpu_kernel void @fpext_f16_to_f32( ; SI-LABEL: fpext_f16_to_f32: diff --git a/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll b/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll index a43292d..a043d53 100644 --- a/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=SI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=VI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn < %s | FileCheck -check-prefixes=SI %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global < %s | FileCheck -check-prefixes=VI %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s define amdgpu_kernel void @fptosi_f16_to_i16( diff --git a/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll b/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll index 96cb621..af1ab37 100644 --- a/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=SI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=VI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefixes=SI %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global < %s | FileCheck -check-prefixes=VI %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s define amdgpu_kernel void @fptoui_f16_to_i16( diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.ll index 5d31177..f1165491 100644 --- a/llvm/test/CodeGen/AMDGPU/fptrunc.ll +++ b/llvm/test/CodeGen/AMDGPU/fptrunc.ll @@ -2,14 +2,14 @@ ; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefixes=SI %s ; RUN: llc -mtriple=amdgcn -mcpu=tonga -global-isel=0 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=VI-SDAG,VI-SAFE-SDAG %s ; RUN: llc -mtriple=amdgcn -mcpu=tonga -global-isel=1 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=VI-GISEL,VI-SAFE-GISEL %s -; RUN: llc -mtriple=amdgcn -mcpu=tonga -global-isel=0 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=VI-SDAG,VI-UNSAFE-SDAG %s +; RUN: llc -mtriple=amdgcn -mcpu=tonga -global-isel=0 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=VI-SDAG,VI-UNSAFE-SDAG %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -global-isel=0 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX10-SDAG,GFX10-SAFE-SDAG %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -global-isel=1 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX10-GISEL,GFX10-SAFE-GISEL %s -; RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -global-isel=0 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX10-SDAG,GFX10-UNSAFE-SDAG %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -global-isel=0 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX10-SDAG,GFX10-UNSAFE-SDAG %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX11-SDAG,GFX11-SAFE-SDAG %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX11-GISEL,GFX11-SAFE-GISEL %s -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=-flat-for-global,+real-true16 -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-SDAG,GFX11-UNSAFE-DAG-TRUE16 %s -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=-flat-for-global,-real-true16 -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-SDAG,GFX11-UNSAFE-DAG-FAKE16 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=-flat-for-global,+real-true16 < %s | FileCheck -check-prefixes=GFX11-SDAG,GFX11-UNSAFE-DAG-TRUE16 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=-flat-for-global,-real-true16 < %s | FileCheck -check-prefixes=GFX11-SDAG,GFX11-UNSAFE-DAG-FAKE16 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=-flat-for-global,+real-true16 < %s | FileCheck -check-prefixes=GFX11-GISEL,GFX11-UNSAFE-GISEL-TRUE16 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=-flat-for-global,-real-true16 < %s | FileCheck -check-prefixes=GFX11-GISEL,GFX11-UNSAFE-GISEL-FAKE16 %s diff --git a/llvm/test/CodeGen/AMDGPU/fract.f64.ll b/llvm/test/CodeGen/AMDGPU/fract.f64.ll index f09c1c6..cc2e78d 100644 --- a/llvm/test/CodeGen/AMDGPU/fract.f64.ll +++ b/llvm/test/CodeGen/AMDGPU/fract.f64.ll @@ -2,8 +2,8 @@ ; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=bonaire < %s | FileCheck --check-prefixes=GCN,CI,FUNC %s ; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck --check-prefixes=GCN,CI,FUNC %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -enable-unsafe-fp-math < %s | FileCheck --check-prefixes=GCN,SI,FUNC %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck --check-prefixes=GCN,CI,FUNC %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn < %s | FileCheck --check-prefixes=GCN,SI,FUNC %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck --check-prefixes=GCN,CI,FUNC %s declare double @llvm.fabs.f64(double) #0 declare double @llvm.floor.f64(double) #0 diff --git a/llvm/test/CodeGen/AMDGPU/fract.ll b/llvm/test/CodeGen/AMDGPU/fract.ll index 8ef0fcf..723fd93 100644 --- a/llvm/test/CodeGen/AMDGPU/fract.ll +++ b/llvm/test/CodeGen/AMDGPU/fract.ll @@ -1,8 +1,8 @@ ; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn < %s | FileCheck --check-prefix=GCN %s ; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=bonaire < %s | FileCheck --check-prefix=GCN %s ; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck --check-prefix=GCN %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -enable-unsafe-fp-math < %s | FileCheck --check-prefix=GCN %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck --check-prefix=GCN %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn < %s | FileCheck --check-prefix=GCN %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck --check-prefix=GCN %s declare float @llvm.fabs.f32(float) #0 declare float @llvm.floor.f32(float) #0 diff --git a/llvm/test/CodeGen/AMDGPU/inline-attr.ll b/llvm/test/CodeGen/AMDGPU/inline-attr.ll index 4ae0ba0..4e93eca 100644 --- a/llvm/test/CodeGen/AMDGPU/inline-attr.ll +++ b/llvm/test/CodeGen/AMDGPU/inline-attr.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 5 -; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -O3 -enable-unsafe-fp-math %s | FileCheck --check-prefixes=GCN,UNSAFE %s +; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -O3 %s | FileCheck --check-prefixes=GCN,UNSAFE %s ; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -O3 -enable-no-nans-fp-math %s | FileCheck --check-prefixes=GCN,NONANS %s ; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -O3 -enable-no-infs-fp-math %s | FileCheck --check-prefixes=GCN,NOINFS %s diff --git a/llvm/test/CodeGen/AMDGPU/prevent-fmul-hoist-ir.ll b/llvm/test/CodeGen/AMDGPU/prevent-fmul-hoist-ir.ll index ef3e04c..6ce614b 100644 --- a/llvm/test/CodeGen/AMDGPU/prevent-fmul-hoist-ir.ll +++ b/llvm/test/CodeGen/AMDGPU/prevent-fmul-hoist-ir.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; RUN: opt -S -passes='simplifycfg<hoist-common-insts>' -mtriple=amdgcn-- --fp-contract=fast -mcpu=gfx1030 < %s | FileCheck -check-prefix=GFX -check-prefix=FP-CONTRACT-FAST %s -; RUN: opt -S -passes='simplifycfg<hoist-common-insts>' -mtriple=amdgcn-- --fp-contract=off --enable-unsafe-fp-math -mcpu=gfx1030 < %s | FileCheck -check-prefix=GFX -check-prefix=UNSAFE-FP-MATH %s +; RUN: opt -S -passes='simplifycfg<hoist-common-insts>' -mtriple=amdgcn-- --fp-contract=off -mcpu=gfx1030 < %s | FileCheck -check-prefix=GFX -check-prefix=UNSAFE-FP-MATH %s ; RUN: opt -S -passes='simplifycfg<hoist-common-insts>' -mtriple=amdgcn-- --fp-contract=off -mcpu=gfx1030 < %s | FileCheck -check-prefix=GFX -check-prefix=NO-UNSAFE-FP-MATH %s define double @is_profitable_f64_contract(ptr dereferenceable(8) %ptr_x, ptr dereferenceable(8) %ptr_y, ptr dereferenceable(8) %ptr_a) #0 { diff --git a/llvm/test/CodeGen/AMDGPU/sitofp.f16.ll b/llvm/test/CodeGen/AMDGPU/sitofp.f16.ll index 09596e9..7ddd90e 100644 --- a/llvm/test/CodeGen/AMDGPU/sitofp.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/sitofp.f16.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=SI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=VI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,+real-true16 -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,-real-true16 -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefixes=SI %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global < %s | FileCheck -check-prefixes=VI %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,+real-true16 < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,-real-true16 < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s define amdgpu_kernel void @sitofp_i16_to_f16( ; SI-LABEL: sitofp_i16_to_f16: diff --git a/llvm/test/CodeGen/AMDGPU/uitofp.f16.ll b/llvm/test/CodeGen/AMDGPU/uitofp.f16.ll index 9bcba6c..2d7ce10 100644 --- a/llvm/test/CodeGen/AMDGPU/uitofp.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/uitofp.f16.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=SI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=VI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,+real-true16 -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s -; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,-real-true16 -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefixes=SI %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global < %s | FileCheck -check-prefixes=VI %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,+real-true16 < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s +; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,-real-true16 < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s define amdgpu_kernel void @uitofp_i16_to_f16( ; SI-LABEL: uitofp_i16_to_f16: diff --git a/llvm/test/CodeGen/Hexagon/swp-phi.ll b/llvm/test/CodeGen/Hexagon/swp-phi.ll index 9b0e126..6ce2481 100644 --- a/llvm/test/CodeGen/Hexagon/swp-phi.ll +++ b/llvm/test/CodeGen/Hexagon/swp-phi.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=hexagon -enable-unsafe-fp-math -enable-pipeliner \ +; RUN: llc -mtriple=hexagon -enable-pipeliner \ ; RUN: -pipeliner-prune-deps=false -stats -o /dev/null < %s ; REQUIRES: asserts diff --git a/llvm/test/CodeGen/NVPTX/fma-assoc.ll b/llvm/test/CodeGen/NVPTX/fma-assoc.ll index 6693c90..db0eae7 100644 --- a/llvm/test/CodeGen/NVPTX/fma-assoc.ll +++ b/llvm/test/CodeGen/NVPTX/fma-assoc.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_20 -fp-contract=fast | FileCheck %s -check-prefix=CHECK -; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_20 -fp-contract=fast -enable-unsafe-fp-math | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-UNSAFE +; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_20 -fp-contract=fast | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-UNSAFE +; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_20 -fp-contract=fast | %ptxas-verify %} ; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_20 -fp-contract=fast | %ptxas-verify %} -; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_20 -fp-contract=fast -enable-unsafe-fp-math | %ptxas-verify %} define ptx_device float @t1_f32(float %x, float %y, float %z, ; CHECK-UNSAFE-LABEL: t1_f32( diff --git a/llvm/test/CodeGen/PowerPC/fmf-propagation.ll b/llvm/test/CodeGen/PowerPC/fmf-propagation.ll index cad684e..baa127e 100644 --- a/llvm/test/CodeGen/PowerPC/fmf-propagation.ll +++ b/llvm/test/CodeGen/PowerPC/fmf-propagation.ll @@ -2,8 +2,8 @@ ; REQUIRES: asserts ; RUN: llc < %s -mtriple=powerpc64le -debug-only=isel -o /dev/null 2>&1 | FileCheck %s --check-prefix=FMFDEBUG ; RUN: llc < %s -mtriple=powerpc64le | FileCheck %s --check-prefix=FMF -; RUN: llc < %s -mtriple=powerpc64le -debug-only=isel -o /dev/null 2>&1 -enable-unsafe-fp-math -fp-contract=fast -enable-no-nans-fp-math | FileCheck %s --check-prefix=GLOBALDEBUG -; RUN: llc < %s -mtriple=powerpc64le -enable-unsafe-fp-math -fp-contract=fast -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math | FileCheck %s --check-prefix=GLOBAL +; RUN: llc < %s -mtriple=powerpc64le -debug-only=isel -o /dev/null 2>&1 -fp-contract=fast -enable-no-nans-fp-math | FileCheck %s --check-prefix=GLOBALDEBUG +; RUN: llc < %s -mtriple=powerpc64le -fp-contract=fast -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math | FileCheck %s --check-prefix=GLOBAL ; Test FP transforms using instruction/node-level fast-math-flags. ; We're also checking debug output to verify that FMF is propagated to the newly created nodes. diff --git a/llvm/test/CodeGen/PowerPC/scalar-equal.ll b/llvm/test/CodeGen/PowerPC/scalar-equal.ll index 1832475..c0b11b4 100644 --- a/llvm/test/CodeGen/PowerPC/scalar-equal.ll +++ b/llvm/test/CodeGen/PowerPC/scalar-equal.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names --enable-unsafe-fp-math \ +; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names \ ; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \ ; RUN: --enable-no-nans-fp-math --enable-no-infs-fp-math \ ; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \ ; RUN: --check-prefix=FAST-P8 -; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names --enable-unsafe-fp-math \ +; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names \ ; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \ ; RUN: --enable-no-nans-fp-math --enable-no-infs-fp-math \ ; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \ diff --git a/llvm/test/CodeGen/PowerPC/scalar-min-max-p10.ll b/llvm/test/CodeGen/PowerPC/scalar-min-max-p10.ll index ca9baceb..5915bd3 100644 --- a/llvm/test/CodeGen/PowerPC/scalar-min-max-p10.ll +++ b/llvm/test/CodeGen/PowerPC/scalar-min-max-p10.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mcpu=pwr10 -ppc-asm-full-reg-names --enable-unsafe-fp-math \ +; RUN: llc -mcpu=pwr10 -ppc-asm-full-reg-names \ ; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \ ; RUN: --enable-no-nans-fp-math \ ; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s diff --git a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll index fd0b494..881d1f4 100644 --- a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll +++ b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names --enable-unsafe-fp-math \ +; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names \ ; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \ ; RUN: --enable-no-nans-fp-math --enable-no-infs-fp-math \ ; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \ ; RUN: --check-prefix=FAST-P8 -; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names --enable-unsafe-fp-math \ +; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names \ ; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \ ; RUN: --enable-no-nans-fp-math --enable-no-infs-fp-math \ ; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \ diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll index 693a40d..5e5f2b7 100644 --- a/llvm/test/CodeGen/RISCV/features-info.ll +++ b/llvm/test/CodeGen/RISCV/features-info.ll @@ -217,6 +217,11 @@ ; CHECK-NEXT: xsfmm64t - 'XSfmm64t' (TE=64 configuration). ; CHECK-NEXT: xsfmmbase - 'XSfmmbase' (All non arithmetic instructions for all TEWs and sf.vtzero). ; CHECK-NEXT: xsfvcp - 'XSfvcp' (SiFive Custom Vector Coprocessor Interface Instructions). +; CHECK-NEXT: xsfvfbfexp16e - 'XSfvfbfexp16e' (SiFive Vector Floating-Point Exponential Function Instruction, BFloat16). +; CHECK-NEXT: xsfvfexp16e - 'XSfvfexp16e' (SiFive Vector Floating-Point Exponential Function Instruction, Half Precision). +; CHECK-NEXT: xsfvfexp32e - 'XSfvfexp32e' (SiFive Vector Floating-Point Exponential Function Instruction, Single Precision). +; CHECK-NEXT: xsfvfexpa - 'XSfvfexpa' (SiFive Vector Floating-Point Exponential Approximation Instruction). +; CHECK-NEXT: xsfvfexpa64e - 'XSfvfexpa64e' (SiFive Vector Floating-Point Exponential Approximation Instruction with Double-Precision). ; CHECK-NEXT: xsfvfnrclipxfqf - 'XSfvfnrclipxfqf' (SiFive FP32-to-int8 Ranged Clip Instructions). ; CHECK-NEXT: xsfvfwmaccqqq - 'XSfvfwmaccqqq' (SiFive Matrix Multiply Accumulate Instruction (4-by-4)). ; CHECK-NEXT: xsfvqmaccdod - 'XSfvqmaccdod' (SiFive Int8 Matrix Multiplication Instructions (2-by-8 and 8-by-2)). diff --git a/llvm/test/CodeGen/SystemZ/fp-sincos-01.ll b/llvm/test/CodeGen/SystemZ/fp-sincos-01.ll index 4a38d7a..c87f113 100644 --- a/llvm/test/CodeGen/SystemZ/fp-sincos-01.ll +++ b/llvm/test/CodeGen/SystemZ/fp-sincos-01.ll @@ -1,7 +1,7 @@ ; Test that combined sin/cos library call is emitted when appropriate ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s --check-prefix=CHECK-OPT -; RUN: llc < %s -mtriple=s390x-linux-gnu -enable-unsafe-fp-math | FileCheck %s --check-prefix=CHECK-OPT +; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s --check-prefix=CHECK-OPT define float @f1(float %x) { ; CHECK-OPT-LABEL: f1: diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-vs-unpredicated-copy.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-vs-unpredicated-copy.mir new file mode 100644 index 0000000..5783133 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-vs-unpredicated-copy.mir @@ -0,0 +1,146 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6 +# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -verify-machineinstrs -o - | FileCheck %s + +# From bug #162644. The _wrong_ output of this test is to generate the +# body of the tail-predicated loop like this: +# +# $q2 = MVE_VORR killed $q0, killed $q0, 0, $noreg, $noreg, undef $q2 +# renamable $r0, renamable $q3 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, renamable $lr :: (load unknown-size from %ir.13, align 4) +# $q0 = MVE_VORR $q1, $q1, 0, $noreg, $noreg, undef $q0 +# renamable $q0 = MVE_VADDf32 killed renamable $q2, killed renamable $q3, 0, killed $noreg, renamable $lr, killed renamable $q0 +# $lr = MVE_LETP killed renamable $lr, %bb.1 +# +# in which the second MVE_VORR, copying q1 into q0, is an invalid conversion of +# the input MQPRCopy, because it won't copy the vector lanes disabled by +# FPSCR.LTPSIZE, and those are needed in the output value of the loop. +# +# In the right output, that MQPRCopy is expanded into a pair of VMOVD copying +# d2,d3 into d0,d1 respectively, which are unaffected by LTPSIZE. + +--- | + target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" + target triple = "thumbv8.1m.main-unknown-none-eabihf" + + @inactive = dso_local local_unnamed_addr global <4 x float> zeroinitializer, align 16 + + define <4 x float> @test_func(ptr %0, i32 %1) { + %3 = load <4 x float>, ptr @inactive, align 16 + %4 = add i32 %1, 3 + %5 = call i32 @llvm.smin.i32(i32 %1, i32 4) + %6 = sub i32 %4, %5 + %7 = lshr i32 %6, 2 + %8 = add nuw nsw i32 %7, 1 + %9 = call i32 @llvm.start.loop.iterations.i32(i32 %8) + br label %10 + + 10: ; preds = %10, %2 + %11 = phi <4 x float> [ splat (float 0x3FB99999A0000000), %2 ], [ %17, %10 ] + %12 = phi i32 [ %1, %2 ], [ %19, %10 ] + %13 = phi ptr [ %0, %2 ], [ %18, %10 ] + %14 = phi i32 [ %9, %2 ], [ %20, %10 ] + %15 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %12) + %16 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %13, i32 4, <4 x i1> %15, <4 x float> zeroinitializer) + %17 = tail call <4 x float> @llvm.arm.mve.add.predicated.v4f32.v4i1(<4 x float> %11, <4 x float> %16, <4 x i1> %15, <4 x float> %3) + %18 = getelementptr inbounds nuw i8, ptr %13, i32 16 + %19 = add i32 %12, -4 + %20 = call i32 @llvm.loop.decrement.reg.i32(i32 %14, i32 1) + %21 = icmp ne i32 %20, 0 + br i1 %21, label %10, label %22 + + 22: ; preds = %10 + ret <4 x float> %17 + } +... +--- +name: test_func +alignment: 4 +legalized: false +tracksRegLiveness: true +registers: [] +liveins: + - { reg: '$r0', virtual-reg: '' } + - { reg: '$r1', virtual-reg: '' } +stack: + - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +body: | + ; CHECK-LABEL: name: test_func + ; CHECK: bb.0 (%ir-block.2): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r7 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8 + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4 + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8 + ; CHECK-NEXT: $r2 = t2MOVi16 target-flags(arm-lo16) @inactive, 14 /* CC::al */, $noreg + ; CHECK-NEXT: $r2 = t2MOVTi16 killed $r2, target-flags(arm-hi16) @inactive, 14 /* CC::al */, $noreg + ; CHECK-NEXT: renamable $q1 = MVE_VLDRWU32 killed renamable $r2, 0, 0, $noreg, $noreg :: (dereferenceable load (s128) from @inactive) + ; CHECK-NEXT: $r3 = t2MOVi16 52429, 14 /* CC::al */, $noreg + ; CHECK-NEXT: $r3 = t2MOVTi16 killed $r3, 15820, 14 /* CC::al */, $noreg + ; CHECK-NEXT: renamable $q0 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q0 + ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1 (%ir-block.10, align 4): + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $lr, $d2, $d3, $q0, $r0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $q2 = MVE_VORR killed $q0, killed $q0, 0, $noreg, $noreg, undef $q2 + ; CHECK-NEXT: renamable $r0, renamable $q3 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, renamable $lr :: (load unknown-size from %ir.13, align 4) + ; CHECK-NEXT: $d0 = VMOVD $d2, 14 /* CC::al */, $noreg + ; CHECK-NEXT: $d1 = VMOVD $d3, 14 /* CC::al */, $noreg + ; CHECK-NEXT: renamable $q0 = MVE_VADDf32 killed renamable $q2, killed renamable $q3, 0, killed $noreg, renamable $lr, killed renamable $q0 + ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2 (%ir-block.22): + ; CHECK-NEXT: liveins: $q0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $q0 + bb.0 (%ir-block.2): + successors: %bb.1(0x80000000) + liveins: $r0, $r1, $r7, $lr + + frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp + frame-setup CFI_INSTRUCTION def_cfa_offset 8 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r7, -8 + $r2 = t2MOVi16 target-flags(arm-lo16) @inactive, 14 /* CC::al */, $noreg + tCMPi8 renamable $r1, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr + $r2 = t2MOVTi16 killed $r2, target-flags(arm-hi16) @inactive, 14 /* CC::al */, $noreg + renamable $r3 = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg + renamable $q1 = MVE_VLDRWU32 killed renamable $r2, 0, 0, $noreg, $noreg :: (dereferenceable load (s128) from @inactive) + $r2 = tMOVr $r1, 14 /* CC::al */, $noreg + t2IT 10, 8, implicit-def $itstate + renamable $r2 = tMOVi8 $noreg, 4, 10 /* CC::ge */, killed $cpsr, implicit killed renamable $r2, implicit killed $itstate + renamable $r2, dead $cpsr = tSUBrr renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg + renamable $r2, dead $cpsr = tADDi8 killed renamable $r2, 3, 14 /* CC::al */, $noreg + renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg + $r3 = t2MOVi16 52429, 14 /* CC::al */, $noreg + $r3 = t2MOVTi16 killed $r3, 15820, 14 /* CC::al */, $noreg + renamable $q0 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q0 + renamable $lr = t2DoLoopStartTP killed renamable $r2, renamable $r1 + + bb.1 (%ir-block.10, align 4): + successors: %bb.1(0x7c000000), %bb.2(0x04000000) + liveins: $lr, $q0, $q1, $r0, $r1 + + renamable $vpr = MVE_VCTP32 renamable $r1, 0, $noreg, $noreg + $q2 = MQPRCopy killed $q0 + MVE_VPST 8, implicit $vpr + renamable $r0, renamable $q3 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr, renamable $lr :: (load unknown-size from %ir.13, align 4) + $q0 = MQPRCopy $q1 + MVE_VPST 8, implicit $vpr + renamable $q0 = MVE_VADDf32 killed renamable $q2, killed renamable $q3, 1, killed renamable $vpr, renamable $lr, killed renamable $q0 + renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg + renamable $lr = t2LoopEndDec killed renamable $lr, %bb.1, implicit-def dead $cpsr + tB %bb.2, 14 /* CC::al */, $noreg + + bb.2 (%ir-block.22): + liveins: $q0 + + frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $q0 +... diff --git a/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-5.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-5.ll index 053d6a1..d741411 100644 --- a/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-5.ll +++ b/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-5.ll @@ -94,5 +94,5 @@ attributes #1 = { minsize nofree norecurse nounwind optsize } !llvm.module.flags = !{!0, !1, !2} !0 = !{i32 8, !"branch-target-enforcement", i32 0} -!1 = !{i32 8, !"sign-return-address", i32 1} +!1 = !{i32 8, !"sign-return-address", i32 2} !2 = !{i32 8, !"sign-return-address-all", i32 0} diff --git a/llvm/test/CodeGen/WebAssembly/memory-interleave.ll b/llvm/test/CodeGen/WebAssembly/memory-interleave.ll index 94efe0f..104ec31 100644 --- a/llvm/test/CodeGen/WebAssembly/memory-interleave.ll +++ b/llvm/test/CodeGen/WebAssembly/memory-interleave.ll @@ -5,6 +5,7 @@ target datalayout = "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-n32:64-S128-ni:1:10:20 %struct.TwoInts = type { i32, i32 } %struct.ThreeInts = type { i32, i32, i32 } %struct.FourInts = type { i32, i32, i32, i32 } +%struct.TwoShorts = type { i16, i16 } %struct.ThreeShorts = type { i16, i16, i16 } %struct.FourShorts = type { i16, i16, i16, i16 } %struct.FiveShorts = type { i16, i16, i16, i16, i16 } @@ -12,6 +13,8 @@ target datalayout = "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-n32:64-S128-ni:1:10:20 %struct.ThreeBytes = type { i8, i8, i8 } %struct.FourBytes = type { i8, i8, i8, i8 } %struct.EightBytes = type { i8, i8, i8, i8, i8, i8, i8, i8 } +%struct.TwoFloats = type { float, float } +%struct.FourFloats = type { float, float, float, float } ; CHECK-LABEL: two_ints_same_op: ; CHECK: loop @@ -1536,3 +1539,1608 @@ define hidden void @scale_uv_row_down2_linear(ptr nocapture noundef readonly %0, 34: ; preds = %6, %4 ret void } + +; CHECK-LABEL: two_floats_same_op: +; CHECK-NOT: f32x4.mul +define hidden void @two_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp21.not = icmp eq i32 %N, 0 + br i1 %cmp21.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.022 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.022 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.022 + %1 = load float, ptr %arrayidx1, align 4 + %mul = fmul float %0, %1 + %arrayidx3 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.022 + store float %mul, ptr %arrayidx3, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %mul8 = fmul float %2, %3 + %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4 + store float %mul8, ptr %y10, align 4 + %inc = add nuw i32 %i.022, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_floats_vary_op: +; CHECK-NOT: f32x4 +define hidden void @two_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp20.not = icmp eq i32 %N, 0 + br i1 %cmp20.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.021 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.021 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.021 + %1 = load float, ptr %arrayidx1, align 4 + %add = fadd float %0, %1 + %arrayidx3 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.021 + store float %add, ptr %arrayidx3, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %sub = fsub float %2, %3 + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4 + store float %sub, ptr %y9, align 4 + %inc = add nuw i32 %i.021, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_bytes_two_floats_same_op: +; CHECK: loop +; CHECK: v128.load64_zero +; CHECK: i8x16.shuffle {{.*}} 0, 2, 4, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: v128.load64_zero +; CHECK: i8x16.shuffle {{.*}} 0, 2, 4, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.mul +; CHECK: i8x16.shuffle {{.*}} 1, 3, 5, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: i8x16.shuffle {{.*}} 1, 3, 5, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.mul +; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 +; CHECK: v128.store +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 +; CHECK: v128.store +define hidden void @two_bytes_two_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp24.not = icmp eq i32 %N, 0 + br i1 %cmp24.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.025 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoBytes, ptr %a, i32 %i.025 + %0 = load i8, ptr %arrayidx, align 1 + %conv = sitofp i8 %0 to float + %arrayidx1 = getelementptr inbounds nuw %struct.TwoBytes, ptr %b, i32 %i.025 + %1 = load i8, ptr %arrayidx1, align 1 + %conv3 = sitofp i8 %1 to float + %mul = fmul float %conv, %conv3 + %arrayidx4 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.025 + store float %mul, ptr %arrayidx4, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 1 + %2 = load i8, ptr %y, align 1 + %conv7 = sitofp i8 %2 to float + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 1 + %3 = load i8, ptr %y9, align 1 + %conv10 = sitofp i8 %3 to float + %mul11 = fmul float %conv7, %conv10 + %y13 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4 + store float %mul11, ptr %y13, align 4 + %inc = add nuw i32 %i.025, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_bytes_two_floats_vary_op: +; CHECK: v128.load64_zero +; CHECK: i8x16.shuffle {{.*}} 0, 2, 4, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: v128.load64_zero +; CHECK: i8x16.shuffle {{.*}} 0, 2, 4, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.add +; CHECK: i8x16.shuffle {{.*}} 1, 3, 5, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: i8x16.shuffle {{.*}} 1, 3, 5, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.sub +; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 +; CHECK: v128.store +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 +; CHECK: v128.store +define hidden void @two_bytes_two_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp23.not = icmp eq i32 %N, 0 + br i1 %cmp23.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.024 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoBytes, ptr %a, i32 %i.024 + %0 = load i8, ptr %arrayidx, align 1 + %conv = sitofp i8 %0 to float + %arrayidx1 = getelementptr inbounds nuw %struct.TwoBytes, ptr %b, i32 %i.024 + %1 = load i8, ptr %arrayidx1, align 1 + %conv3 = sitofp i8 %1 to float + %add = fadd float %conv, %conv3 + %arrayidx4 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.024 + store float %add, ptr %arrayidx4, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 1 + %2 = load i8, ptr %y, align 1 + %conv7 = sitofp i8 %2 to float + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 1 + %3 = load i8, ptr %y9, align 1 + %conv10 = sitofp i8 %3 to float + %sub = fsub float %conv7, %conv10 + %y12 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4 + store float %sub, ptr %y12, align 4 + %inc = add nuw i32 %i.024, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_floats_two_bytes_same_op: +; CHECK: loop +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 +; CHECK: f32x4.mul +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.splat +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 +; CHECK: f32x4.mul +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: v128.store64_lane +define hidden void @two_floats_two_bytes_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp22.not = icmp eq i32 %N, 0 + br i1 %cmp22.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.023 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.023 + %1 = load float, ptr %arrayidx1, align 4 + %mul = fmul float %0, %1 + %conv = fptosi float %mul to i8 + %arrayidx3 = getelementptr inbounds nuw %struct.TwoBytes, ptr %res, i32 %i.023 + store i8 %conv, ptr %arrayidx3, align 1 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %mul8 = fmul float %2, %3 + %conv9 = fptosi float %mul8 to i8 + %y11 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 1 + store i8 %conv9, ptr %y11, align 1 + %inc = add nuw i32 %i.023, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_floats_two_bytes_vary_op: +; CHECK: loop +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 +; CHECK: f32x4.add +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.splat +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 +; CHECK: f32x4.sub +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: v128.store64_lane +define hidden void @two_floats_two_bytes_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp21.not = icmp eq i32 %N, 0 + br i1 %cmp21.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.022 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.022 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.022 + %1 = load float, ptr %arrayidx1, align 4 + %add = fadd float %0, %1 + %conv = fptosi float %add to i8 + %arrayidx3 = getelementptr inbounds nuw %struct.TwoBytes, ptr %res, i32 %i.022 + store i8 %conv, ptr %arrayidx3, align 1 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %sub = fsub float %2, %3 + %conv8 = fptosi float %sub to i8 + %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 1 + store i8 %conv8, ptr %y10, align 1 + %inc = add nuw i32 %i.022, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_shorts_two_floats_same_op: +; CHECK: loop +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.mul +; CHECK: i8x16.shuffle {{.*}} 2, 3, 6, 7, 10, 11, 14, 15, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: i8x16.shuffle {{.*}} 2, 3, 6, 7, 10, 11, 14, 15, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.mul +; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 +; CHECK: v128.store +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 +; CHECK: v128.store +define hidden void @two_shorts_two_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp24.not = icmp eq i32 %N, 0 + br i1 %cmp24.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.025 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoShorts, ptr %a, i32 %i.025 + %0 = load i16, ptr %arrayidx, align 2 + %conv = sitofp i16 %0 to float + %arrayidx1 = getelementptr inbounds nuw %struct.TwoShorts, ptr %b, i32 %i.025 + %1 = load i16, ptr %arrayidx1, align 2 + %conv3 = sitofp i16 %1 to float + %mul = fmul float %conv, %conv3 + %arrayidx4 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.025 + store float %mul, ptr %arrayidx4, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2 + %2 = load i16, ptr %y, align 2 + %conv7 = sitofp i16 %2 to float + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2 + %3 = load i16, ptr %y9, align 2 + %conv10 = sitofp i16 %3 to float + %mul11 = fmul float %conv7, %conv10 + %y13 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4 + store float %mul11, ptr %y13, align 4 + %inc = add nuw i32 %i.025, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_shorts_two_floats_vary_op: +; CHECK: loop +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.add +; CHECK: i8x16.shuffle {{.*}} 2, 3, 6, 7, 10, 11, 14, 15, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: i8x16.shuffle {{.*}} 2, 3, 6, 7, 10, 11, 14, 15, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.sub +; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 +; CHECK: v128.store +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 +; CHECK: v128.store +define hidden void @two_shorts_two_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp23.not = icmp eq i32 %N, 0 + br i1 %cmp23.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.024 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoShorts, ptr %a, i32 %i.024 + %0 = load i16, ptr %arrayidx, align 2 + %conv = sitofp i16 %0 to float + %arrayidx1 = getelementptr inbounds nuw %struct.TwoShorts, ptr %b, i32 %i.024 + %1 = load i16, ptr %arrayidx1, align 2 + %conv3 = sitofp i16 %1 to float + %add = fadd float %conv, %conv3 + %arrayidx4 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.024 + store float %add, ptr %arrayidx4, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2 + %2 = load i16, ptr %y, align 2 + %conv7 = sitofp i16 %2 to float + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2 + %3 = load i16, ptr %y9, align 2 + %conv10 = sitofp i16 %3 to float + %sub = fsub float %conv7, %conv10 + %y12 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4 + store float %sub, ptr %y12, align 4 + %inc = add nuw i32 %i.024, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_floats_two_shorts_same_op: +; CHECK: loop +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 +; CHECK: f32x4.mul +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.splat +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 +; CHECK: f32x4.mul +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: v128.store +define hidden void @two_floats_two_shorts_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp22.not = icmp eq i32 %N, 0 + br i1 %cmp22.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.023 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.023 + %1 = load float, ptr %arrayidx1, align 4 + %mul = fmul float %0, %1 + %conv = fptosi float %mul to i16 + %arrayidx3 = getelementptr inbounds nuw %struct.TwoShorts, ptr %res, i32 %i.023 + store i16 %conv, ptr %arrayidx3, align 2 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %mul8 = fmul float %2, %3 + %conv9 = fptosi float %mul8 to i16 + %y11 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2 + store i16 %conv9, ptr %y11, align 2 + %inc = add nuw i32 %i.023, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_floats_two_shorts_vary_op: +; CHECK: loop +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 +; CHECK: f32x4.add +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.splat +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 +; CHECK: f32x4.sub +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: v128.store +define hidden void @two_floats_two_shorts_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp21.not = icmp eq i32 %N, 0 + br i1 %cmp21.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.022 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.022 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.022 + %1 = load float, ptr %arrayidx1, align 4 + %add = fadd float %0, %1 + %conv = fptosi float %add to i16 + %arrayidx3 = getelementptr inbounds nuw %struct.TwoShorts, ptr %res, i32 %i.022 + store i16 %conv, ptr %arrayidx3, align 2 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %sub = fsub float %2, %3 + %conv8 = fptosi float %sub to i16 + %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2 + store i16 %conv8, ptr %y10, align 2 + %inc = add nuw i32 %i.022, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_floats_same_op: +; CHECK: loop +; CHECK: v128.load +; CHECK: v128.load +; CHECK: f32x4.mul +; CHECK: v128.store +define hidden void @four_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp45.not = icmp eq i32 %N, 0 + br i1 %cmp45.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.046 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.046 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.046 + %1 = load float, ptr %arrayidx1, align 4 + %mul = fmul float %0, %1 + %arrayidx3 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.046 + store float %mul, ptr %arrayidx3, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %mul8 = fmul float %2, %3 + %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4 + store float %mul8, ptr %y10, align 4 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8 + %4 = load float, ptr %z, align 4 + %z13 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8 + %5 = load float, ptr %z13, align 4 + %mul14 = fmul float %4, %5 + %z16 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 8 + store float %mul14, ptr %z16, align 4 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12 + %6 = load float, ptr %w, align 4 + %w19 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12 + %7 = load float, ptr %w19, align 4 + %mul20 = fmul float %6, %7 + %w22 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 12 + store float %mul20, ptr %w22, align 4 + %inc = add nuw i32 %i.046, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_floats_vary_op: +; CHECK-NOT: f32x4 +define hidden void @four_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp42.not = icmp eq i32 %N, 0 + br i1 %cmp42.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.043 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.043 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.043 + %1 = load float, ptr %arrayidx1, align 4 + %add = fadd float %0, %1 + %arrayidx3 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.043 + store float %add, ptr %arrayidx3, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %sub = fsub float %2, %3 + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4 + store float %sub, ptr %y9, align 4 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8 + %4 = load float, ptr %z, align 4 + %z12 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8 + %5 = load float, ptr %z12, align 4 + %mul = fmul float %4, %5 + %z14 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 8 + store float %mul, ptr %z14, align 4 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12 + %6 = load float, ptr %w, align 4 + %w17 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12 + %7 = load float, ptr %w17, align 4 + %div = fdiv float %6, %7 + %w19 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 12 + store float %div, ptr %w19, align 4 + %inc = add nuw i32 %i.043, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_bytes_four_floats_same_op: +; CHECK: loop +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 4, 8, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 4, 8, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.mul +; CHECK: i8x16.shuffle {{.*}} 1, 5, 9, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: i8x16.shuffle {{.*}} 1, 5, 9, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.mul +; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 2, 6, 10, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: i8x16.shuffle {{.*}} 2, 6, 10, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.mul +; CHECK: i8x16.shuffle {{.*}} 3, 7, 11, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: i8x16.shuffle {{.*}} 3, 7, 11, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.mul +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.store +; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.store +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.store +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.store +define hidden void @four_bytes_four_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp52.not = icmp eq i32 %N, 0 + br i1 %cmp52.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.053 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourBytes, ptr %a, i32 %i.053 + %0 = load i8, ptr %arrayidx, align 1 + %conv = sitofp i8 %0 to float + %arrayidx1 = getelementptr inbounds nuw %struct.FourBytes, ptr %b, i32 %i.053 + %1 = load i8, ptr %arrayidx1, align 1 + %conv3 = sitofp i8 %1 to float + %mul = fmul float %conv, %conv3 + %arrayidx4 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.053 + store float %mul, ptr %arrayidx4, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 1 + %2 = load i8, ptr %y, align 1 + %conv7 = sitofp i8 %2 to float + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 1 + %3 = load i8, ptr %y9, align 1 + %conv10 = sitofp i8 %3 to float + %mul11 = fmul float %conv7, %conv10 + %y13 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4 + store float %mul11, ptr %y13, align 4 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2 + %4 = load i8, ptr %z, align 1 + %conv15 = sitofp i8 %4 to float + %z17 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2 + %5 = load i8, ptr %z17, align 1 + %conv18 = sitofp i8 %5 to float + %mul19 = fmul float %conv15, %conv18 + %z21 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 8 + store float %mul19, ptr %z21, align 4 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 3 + %6 = load i8, ptr %w, align 1 + %conv23 = sitofp i8 %6 to float + %w25 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 3 + %7 = load i8, ptr %w25, align 1 + %conv26 = sitofp i8 %7 to float + %mul27 = fmul float %conv23, %conv26 + %w29 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 12 + store float %mul27, ptr %w29, align 4 + %inc = add nuw i32 %i.053, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_bytes_four_floats_vary_op: +; CHECK: loop +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 4, 8, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 4, 8, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.mul +; CHECK: i8x16.shuffle {{.*}} 1, 5, 9, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: i8x16.shuffle {{.*}} 1, 5, 9, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.add +; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 2, 6, 10, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: i8x16.shuffle {{.*}} 2, 6, 10, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.div +; CHECK: i8x16.shuffle {{.*}} 3, 7, 11, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: i8x16.shuffle {{.*}} 3, 7, 11, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK: i16x8.extend_low_i8x16_s +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.sub +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.store +; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.store +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.store +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.store +define hidden void @four_bytes_four_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp49.not = icmp eq i32 %N, 0 + br i1 %cmp49.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.050 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourBytes, ptr %a, i32 %i.050 + %0 = load i8, ptr %arrayidx, align 1 + %conv = sitofp i8 %0 to float + %arrayidx1 = getelementptr inbounds nuw %struct.FourBytes, ptr %b, i32 %i.050 + %1 = load i8, ptr %arrayidx1, align 1 + %conv3 = sitofp i8 %1 to float + %mul = fmul float %conv, %conv3 + %arrayidx4 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.050 + store float %mul, ptr %arrayidx4, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 1 + %2 = load i8, ptr %y, align 1 + %conv7 = sitofp i8 %2 to float + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 1 + %3 = load i8, ptr %y9, align 1 + %conv10 = sitofp i8 %3 to float + %add = fadd float %conv7, %conv10 + %y12 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4 + store float %add, ptr %y12, align 4 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2 + %4 = load i8, ptr %z, align 1 + %conv14 = sitofp i8 %4 to float + %z16 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2 + %5 = load i8, ptr %z16, align 1 + %conv17 = sitofp i8 %5 to float + %div = fdiv float %conv14, %conv17 + %z19 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 8 + store float %div, ptr %z19, align 4 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 3 + %6 = load i8, ptr %w, align 1 + %conv21 = sitofp i8 %6 to float + %w23 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 3 + %7 = load i8, ptr %w23, align 1 + %conv24 = sitofp i8 %7 to float + %sub = fsub float %conv21, %conv24 + %w26 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 12 + store float %sub, ptr %w26, align 4 + %inc = add nuw i32 %i.050, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_floats_four_bytes_same_op: +; CHECK: loop +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: f32x4.mul +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.splat +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: f32x4.mul +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: f32x4.mul +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: f32x4.mul +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: v128.store +define hidden void @four_floats_four_bytes_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp48.not = icmp eq i32 %N, 0 + br i1 %cmp48.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.049 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.049 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.049 + %1 = load float, ptr %arrayidx1, align 4 + %mul = fmul float %0, %1 + %conv = fptosi float %mul to i8 + %arrayidx3 = getelementptr inbounds nuw %struct.FourBytes, ptr %res, i32 %i.049 + store i8 %conv, ptr %arrayidx3, align 1 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %mul8 = fmul float %2, %3 + %conv9 = fptosi float %mul8 to i8 + %y11 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 1 + store i8 %conv9, ptr %y11, align 1 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8 + %4 = load float, ptr %z, align 4 + %z14 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8 + %5 = load float, ptr %z14, align 4 + %mul15 = fmul float %4, %5 + %conv16 = fptosi float %mul15 to i8 + %z18 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2 + store i8 %conv16, ptr %z18, align 1 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12 + %6 = load float, ptr %w, align 4 + %w21 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12 + %7 = load float, ptr %w21, align 4 + %mul22 = fmul float %6, %7 + %conv23 = fptosi float %mul22 to i8 + %w25 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 3 + store i8 %conv23, ptr %w25, align 1 + %inc = add nuw i32 %i.049, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_floats_four_bytes_vary_op: +; CHECK: loop +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: f32x4.mul +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.splat +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: f32x4.add +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: f32x4.div +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: f32x4.sub +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i8x16.replace_lane +; CHECK: v128.store +define hidden void @four_floats_four_bytes_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp45.not = icmp eq i32 %N, 0 + br i1 %cmp45.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.046 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.046 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.046 + %1 = load float, ptr %arrayidx1, align 4 + %mul = fmul float %0, %1 + %conv = fptosi float %mul to i8 + %arrayidx3 = getelementptr inbounds nuw %struct.FourBytes, ptr %res, i32 %i.046 + store i8 %conv, ptr %arrayidx3, align 1 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %add = fadd float %2, %3 + %conv8 = fptosi float %add to i8 + %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 1 + store i8 %conv8, ptr %y10, align 1 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8 + %4 = load float, ptr %z, align 4 + %z13 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8 + %5 = load float, ptr %z13, align 4 + %div = fdiv float %4, %5 + %conv14 = fptosi float %div to i8 + %z16 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2 + store i8 %conv14, ptr %z16, align 1 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12 + %6 = load float, ptr %w, align 4 + %w19 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12 + %7 = load float, ptr %w19, align 4 + %sub = fsub float %6, %7 + %conv20 = fptosi float %sub to i8 + %w22 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 3 + store i8 %conv20, ptr %w22, align 1 + %inc = add nuw i32 %i.046, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_shorts_four_floats_same_op: +; CHECK: loop +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 8, 9, 16, 17, 24, 25, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 8, 9, 16, 17, 24, 25, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.mul +; CHECK: i8x16.shuffle {{.*}} 2, 3, 10, 11, 18, 19, 26, 27, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: i8x16.shuffle {{.*}} 2, 3, 10, 11, 18, 19, 26, 27, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.mul +; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 4, 5, 12, 13, 20, 21, 28, 29, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: i8x16.shuffle {{.*}} 4, 5, 12, 13, 20, 21, 28, 29, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.mul +; CHECK: i8x16.shuffle {{.*}} 6, 7, 14, 15, 22, 23, 30, 31, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: i8x16.shuffle {{.*}} 6, 7, 14, 15, 22, 23, 30, 31, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.mul +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.store +; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.store +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.store +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.store +define hidden void @four_shorts_four_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp52.not = icmp eq i32 %N, 0 + br i1 %cmp52.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.053 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourShorts, ptr %a, i32 %i.053 + %0 = load i16, ptr %arrayidx, align 2 + %conv = sitofp i16 %0 to float + %arrayidx1 = getelementptr inbounds nuw %struct.FourShorts, ptr %b, i32 %i.053 + %1 = load i16, ptr %arrayidx1, align 2 + %conv3 = sitofp i16 %1 to float + %mul = fmul float %conv, %conv3 + %arrayidx4 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.053 + store float %mul, ptr %arrayidx4, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2 + %2 = load i16, ptr %y, align 2 + %conv7 = sitofp i16 %2 to float + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2 + %3 = load i16, ptr %y9, align 2 + %conv10 = sitofp i16 %3 to float + %mul11 = fmul float %conv7, %conv10 + %y13 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4 + store float %mul11, ptr %y13, align 4 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %4 = load i16, ptr %z, align 2 + %conv15 = sitofp i16 %4 to float + %z17 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %5 = load i16, ptr %z17, align 2 + %conv18 = sitofp i16 %5 to float + %mul19 = fmul float %conv15, %conv18 + %z21 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 8 + store float %mul19, ptr %z21, align 4 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 6 + %6 = load i16, ptr %w, align 2 + %conv23 = sitofp i16 %6 to float + %w25 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 6 + %7 = load i16, ptr %w25, align 2 + %conv26 = sitofp i16 %7 to float + %mul27 = fmul float %conv23, %conv26 + %w29 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 12 + store float %mul27, ptr %w29, align 4 + %inc = add nuw i32 %i.053, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_shorts_four_floats_vary_op: +; CHECK: loop +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 8, 9, 16, 17, 24, 25, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 8, 9, 16, 17, 24, 25, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.mul +; CHECK: i8x16.shuffle {{.*}} 2, 3, 10, 11, 18, 19, 26, 27, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: i8x16.shuffle {{.*}} 2, 3, 10, 11, 18, 19, 26, 27, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.add +; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 4, 5, 12, 13, 20, 21, 28, 29, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: i8x16.shuffle {{.*}} 4, 5, 12, 13, 20, 21, 28, 29, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.div +; CHECK: i8x16.shuffle {{.*}} 6, 7, 14, 15, 22, 23, 30, 31, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: i8x16.shuffle {{.*}} 6, 7, 14, 15, 22, 23, 30, 31, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK: i32x4.extend_low_i16x8_s +; CHECK: f32x4.convert_i32x4_s +; CHECK: f32x4.sub +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.store +; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.store +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.store +define hidden void @four_shorts_four_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp49.not = icmp eq i32 %N, 0 + br i1 %cmp49.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.050 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourShorts, ptr %a, i32 %i.050 + %0 = load i16, ptr %arrayidx, align 2 + %conv = sitofp i16 %0 to float + %arrayidx1 = getelementptr inbounds nuw %struct.FourShorts, ptr %b, i32 %i.050 + %1 = load i16, ptr %arrayidx1, align 2 + %conv3 = sitofp i16 %1 to float + %mul = fmul float %conv, %conv3 + %arrayidx4 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.050 + store float %mul, ptr %arrayidx4, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2 + %2 = load i16, ptr %y, align 2 + %conv7 = sitofp i16 %2 to float + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2 + %3 = load i16, ptr %y9, align 2 + %conv10 = sitofp i16 %3 to float + %add = fadd float %conv7, %conv10 + %y12 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4 + store float %add, ptr %y12, align 4 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %4 = load i16, ptr %z, align 2 + %conv14 = sitofp i16 %4 to float + %z16 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %5 = load i16, ptr %z16, align 2 + %conv17 = sitofp i16 %5 to float + %div = fdiv float %conv14, %conv17 + %z19 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 8 + store float %div, ptr %z19, align 4 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 6 + %6 = load i16, ptr %w, align 2 + %conv21 = sitofp i16 %6 to float + %w23 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 6 + %7 = load i16, ptr %w23, align 2 + %conv24 = sitofp i16 %7 to float + %sub = fsub float %conv21, %conv24 + %w26 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 12 + store float %sub, ptr %w26, align 4 + %inc = add nuw i32 %i.050, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_floats_four_shorts_same_op: +; CHECK: loop +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: f32x4.mul +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.splat +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: f32x4.mul +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: f32x4.mul +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: f32x4.mul +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: v128.store +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.splat +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: v128.store +define hidden void @four_floats_four_shorts_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp48.not = icmp eq i32 %N, 0 + br i1 %cmp48.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.049 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.049 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.049 + %1 = load float, ptr %arrayidx1, align 4 + %mul = fmul float %0, %1 + %conv = fptosi float %mul to i16 + %arrayidx3 = getelementptr inbounds nuw %struct.FourShorts, ptr %res, i32 %i.049 + store i16 %conv, ptr %arrayidx3, align 2 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %mul8 = fmul float %2, %3 + %conv9 = fptosi float %mul8 to i16 + %y11 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2 + store i16 %conv9, ptr %y11, align 2 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8 + %4 = load float, ptr %z, align 4 + %z14 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8 + %5 = load float, ptr %z14, align 4 + %mul15 = fmul float %4, %5 + %conv16 = fptosi float %mul15 to i16 + %z18 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4 + store i16 %conv16, ptr %z18, align 2 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12 + %6 = load float, ptr %w, align 4 + %w21 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12 + %7 = load float, ptr %w21, align 4 + %mul22 = fmul float %6, %7 + %conv23 = fptosi float %mul22 to i16 + %w25 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 6 + store i16 %conv23, ptr %w25, align 2 + %inc = add nuw i32 %i.049, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_floats_four_shorts_vary_op: +; CHECK: loop +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: v128.load +; CHECK: v128.load +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: f32x4.mul +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.splat +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: f32x4.add +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: f32x4.div +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31 +; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 +; CHECK: f32x4.sub +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: v128.store +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.splat +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: f32x4.extract_lane +; CHECK: i32.trunc_sat_f32_s +; CHECK: i16x8.replace_lane +; CHECK: v128.store +define hidden void @four_floats_four_shorts_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp45.not = icmp eq i32 %N, 0 + br i1 %cmp45.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.046 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.046 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.046 + %1 = load float, ptr %arrayidx1, align 4 + %mul = fmul float %0, %1 + %conv = fptosi float %mul to i16 + %arrayidx3 = getelementptr inbounds nuw %struct.FourShorts, ptr %res, i32 %i.046 + store i16 %conv, ptr %arrayidx3, align 2 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %add = fadd float %2, %3 + %conv8 = fptosi float %add to i16 + %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2 + store i16 %conv8, ptr %y10, align 2 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8 + %4 = load float, ptr %z, align 4 + %z13 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8 + %5 = load float, ptr %z13, align 4 + %div = fdiv float %4, %5 + %conv14 = fptosi float %div to i16 + %z16 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4 + store i16 %conv14, ptr %z16, align 2 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12 + %6 = load float, ptr %w, align 4 + %w19 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12 + %7 = load float, ptr %w19, align 4 + %sub = fsub float %6, %7 + %conv20 = fptosi float %sub to i16 + %w22 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 6 + store i16 %conv20, ptr %w22, align 2 + %inc = add nuw i32 %i.046, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} diff --git a/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmax.ll b/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmax.ll new file mode 100644 index 0000000..45f4ddd --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmax.ll @@ -0,0 +1,60 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 + +; RUN: llc < %s -mtriple=wasm32-unknown-unknown -mattr=+simd128,+relaxed-simd | FileCheck %s + +; Test that fmaxnum and fmaximumnum get transformed to relaxed_max + +target triple = "wasm32" + +define <4 x float> @test_maxnum_f32x4(<4 x float> %a, <4 x float> %b) { +; CHECK-LABEL: test_maxnum_f32x4: +; CHECK: .functype test_maxnum_f32x4 (v128, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: f32x4.relaxed_max +; CHECK-NEXT: # fallthrough-return + %result = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %a, <4 x float> %b) + ret <4 x float> %result +} + +define <4 x float> @test_maximumnum_f32x4(<4 x float> %a, <4 x float> %b) { +; CHECK-LABEL: test_maximumnum_f32x4: +; CHECK: .functype test_maximumnum_f32x4 (v128, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: f32x4.relaxed_max +; CHECK-NEXT: # fallthrough-return + %result = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> %a, <4 x float> %b) + ret <4 x float> %result +} + +define <2 x double> @test_maxnum_f64x2(<2 x double> %a, <2 x double> %b) { +; CHECK-LABEL: test_maxnum_f64x2: +; CHECK: .functype test_maxnum_f64x2 (v128, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: f64x2.relaxed_max +; CHECK-NEXT: # fallthrough-return + %result = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %a, <2 x double> %b) + ret <2 x double> %result +} + +define <2 x double> @test_minimumnum_f64x2(<2 x double> %a, <2 x double> %b) { +; CHECK-LABEL: test_minimumnum_f64x2: +; CHECK: .functype test_minimumnum_f64x2 (v128, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: f64x2.relaxed_max +; CHECK-NEXT: # fallthrough-return + %result = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> %a, <2 x double> %b) + ret <2 x double> %result +} + +declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>) +declare <4 x float> @llvm.maximumnum.v4f32(<4 x float>, <4 x float>) +declare <2 x double> @llvm.maxnum.v2f64(<2 x double>, <2 x double>) +declare <2 x double> @llvm.maximumnum.v2f64(<2 x double>, <2 x double>) diff --git a/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmin.ll b/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmin.ll new file mode 100644 index 0000000..f3eec02 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmin.ll @@ -0,0 +1,59 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc < %s -mtriple=wasm32-unknown-unknown -mattr=+simd128,+relaxed-simd | FileCheck %s + +; Test that fminnum and fminimumnum get transformed to relaxed_min + +target triple = "wasm32" + +define <4 x float> @test_minnum_f32x4(<4 x float> %a, <4 x float> %b) { +; CHECK-LABEL: test_minnum_f32x4: +; CHECK: .functype test_minnum_f32x4 (v128, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: f32x4.relaxed_min +; CHECK-NEXT: # fallthrough-return + %result = call <4 x float> @llvm.minnum.v4f32(<4 x float> %a, <4 x float> %b) + ret <4 x float> %result +} + +define <4 x float> @test_minimumnum_f32x4(<4 x float> %a, <4 x float> %b) { +; CHECK-LABEL: test_minimumnum_f32x4: +; CHECK: .functype test_minimumnum_f32x4 (v128, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: f32x4.relaxed_min +; CHECK-NEXT: # fallthrough-return + %result = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> %a, <4 x float> %b) + ret <4 x float> %result +} + +define <2 x double> @test_minnum_f64x2(<2 x double> %a, <2 x double> %b) { +; CHECK-LABEL: test_minnum_f64x2: +; CHECK: .functype test_minnum_f64x2 (v128, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: f64x2.relaxed_min +; CHECK-NEXT: # fallthrough-return + %result = call <2 x double> @llvm.minnum.v2f64(<2 x double> %a, <2 x double> %b) + ret <2 x double> %result +} + +define <2 x double> @test_minimumnum_f64x2(<2 x double> %a, <2 x double> %b) { +; CHECK-LABEL: test_minimumnum_f64x2: +; CHECK: .functype test_minimumnum_f64x2 (v128, v128) -> (v128) +; CHECK-NEXT: # %bb.0: +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 1 +; CHECK-NEXT: f64x2.relaxed_min +; CHECK-NEXT: # fallthrough-return + %result = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %a, <2 x double> %b) + ret <2 x double> %result +} + +declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>) +declare <4 x float> @llvm.fminimumnum.v4f32(<4 x float>, <4 x float>) +declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>) +declare <2 x double> @llvm.fminimumnum.v2f64(<2 x double>, <2 x double>) diff --git a/llvm/test/CodeGen/WebAssembly/simd-vector-trunc.ll b/llvm/test/CodeGen/WebAssembly/simd-vector-trunc.ll index 123438d..f58456b 100644 --- a/llvm/test/CodeGen/WebAssembly/simd-vector-trunc.ll +++ b/llvm/test/CodeGen/WebAssembly/simd-vector-trunc.ll @@ -94,6 +94,19 @@ entry: ret <16 x i8> %0 } +define <8 x i8> @trunc8i16_8i8(<8 x i16> %a) { +; CHECK-LABEL: trunc8i16_8i8: +; CHECK: .functype trunc8i16_8i8 (v128) -> (v128) +; CHECK-NEXT: # %bb.0: # %entry +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i8x16.shuffle 0, 2, 4, 6, 8, 10, 12, 14, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK-NEXT: # fallthrough-return +entry: + %0 = trunc <8 x i16> %a to <8 x i8> + ret <8 x i8> %0 +} + define <8 x i16> @trunc8i64_8i16(<8 x i64> %a) { ; CHECK-LABEL: trunc8i64_8i16: ; CHECK: .functype trunc8i64_8i16 (v128, v128, v128, v128) -> (v128) @@ -139,3 +152,29 @@ entry: %0 = trunc <8 x i32> %a to <8 x i16> ret <8 x i16> %0 } + +define <4 x i16> @trunc4i32_4i16(<4 x i32> %a) { +; CHECK-LABEL: trunc4i32_4i16: +; CHECK: .functype trunc4i32_4i16 (v128) -> (v128) +; CHECK-NEXT: # %bb.0: # %entry +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i8x16.shuffle 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 0, 1, 0, 1, 0, 1 +; CHECK-NEXT: # fallthrough-return +entry: + %0 = trunc <4 x i32> %a to <4 x i16> + ret <4 x i16> %0 +} + +define <4 x i8> @trunc4i32_4i8(<4 x i32> %a) { +; CHECK-LABEL: trunc4i32_4i8: +; CHECK: .functype trunc4i32_4i8 (v128) -> (v128) +; CHECK-NEXT: # %bb.0: # %entry +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: local.get 0 +; CHECK-NEXT: i8x16.shuffle 0, 4, 8, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +; CHECK-NEXT: # fallthrough-return +entry: + %0 = trunc <4 x i32> %a to <4 x i8> + ret <4 x i8> %0 +} diff --git a/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll b/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll index bea11e9..940fe8c 100644 --- a/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll +++ b/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll @@ -1,5 +1,5 @@ ; RUN: llc < %s -mtriple=i686-- -mattr=-sse | FileCheck %s -check-prefix=WITHNANS -; RUN: llc < %s -mtriple=i686-- -mattr=-sse -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s -check-prefix=NONANS +; RUN: llc < %s -mtriple=i686-- -mattr=-sse -enable-no-nans-fp-math | FileCheck %s -check-prefix=NONANS ; WITHNANS-LABEL: test: ; WITHNANS: setnp diff --git a/llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll b/llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll index 8411a40..ff7a99a 100644 --- a/llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll +++ b/llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -enable-unsafe-fp-math -mtriple=i686-- | FileCheck %s +; RUN: llc < %s -mtriple=i686-- | FileCheck %s ; rdar://5902801 declare void @test2() diff --git a/llvm/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll b/llvm/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll index 6ebbb2e..0e0e20f 100644 --- a/llvm/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll +++ b/llvm/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -enable-unsafe-fp-math +; RUN: llc < %s ; <rdar://problem/12180135> target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128" target triple = "i386-apple-macosx10.8.0" diff --git a/llvm/test/CodeGen/X86/avx-minmax.ll b/llvm/test/CodeGen/X86/avx-minmax.ll index 6da04c5..8e4b6c6 100644 --- a/llvm/test/CodeGen/X86/avx-minmax.ll +++ b/llvm/test/CodeGen/X86/avx-minmax.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx -enable-no-nans-fp-math | FileCheck %s define <2 x double> @maxpd(<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: maxpd: diff --git a/llvm/test/CodeGen/X86/avx512-unsafe-fp-math.ll b/llvm/test/CodeGen/X86/avx512-unsafe-fp-math.ll index f827998..eb9de8a 100644 --- a/llvm/test/CodeGen/X86/avx512-unsafe-fp-math.ll +++ b/llvm/test/CodeGen/X86/avx512-unsafe-fp-math.ll @@ -2,7 +2,7 @@ ; RUN: llc < %s -mtriple=x86_64 -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mattr=+avx512f | FileCheck %s --check-prefix=CHECK_UNSAFE ; RUN: llc < %s -mtriple=x86_64 -enable-no-nans-fp-math -mattr=+avx512f | FileCheck %s ; RUN: llc < %s -mtriple=x86_64 -enable-no-signed-zeros-fp-math -mattr=+avx512f | FileCheck %s -; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512f | FileCheck %s +; RUN: llc < %s -mtriple=x86_64 -mattr=+avx512f | FileCheck %s ; RUN: llc < %s -mtriple=x86_64 -mattr=+avx512f | FileCheck %s define <16 x float> @test_max_v16f32(ptr %a_ptr, <16 x float> %b) { diff --git a/llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll b/llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll index 5d9784a..1147d79 100644 --- a/llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll +++ b/llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl --enable-unsafe-fp-math | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl | FileCheck %s define dso_local <32 x half> @test1(<32 x half> %acc.coerce, <32 x half> %lhs.coerce, <32 x half> %rhs.coerce) { ; CHECK-LABEL: test1: diff --git a/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll b/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll index b58bae9..1c4d9c6 100644 --- a/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll +++ b/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl --enable-unsafe-fp-math | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl | FileCheck %s define dso_local <32 x half> @test1(<32 x half> %acc.coerce, <32 x half> %lhs.coerce.conj, <32 x half> %rhs.coerce) local_unnamed_addr #0 { ; CHECK-LABEL: test1: diff --git a/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll b/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll index 92bdebb..a8ff969 100644 --- a/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll +++ b/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl --enable-unsafe-fp-math | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl | FileCheck %s define dso_local <32 x half> @test1(<32 x half> %lhs.coerce.conj, <32 x half> %rhs.coerce) local_unnamed_addr #0 { ; CHECK-LABEL: test1: diff --git a/llvm/test/CodeGen/X86/bf16-fast-isel.ll b/llvm/test/CodeGen/X86/bf16-fast-isel.ll new file mode 100644 index 0000000..c659e0e --- /dev/null +++ b/llvm/test/CodeGen/X86/bf16-fast-isel.ll @@ -0,0 +1,66 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --fast-isel < %s -mtriple=x86_64-unknown-unknown | FileCheck %s + +define i8 @test_direct_call(ptr %f) nounwind { +; CHECK-LABEL: test_direct_call: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushq %rax +; CHECK-NEXT: callq foo@PLT +; CHECK-NEXT: callq bar@PLT +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: retq +entry: + %call = call bfloat @foo(ptr %f) + %call2 = call zeroext i8 @bar(bfloat %call) + ret i8 %call2 +} + +define i8 @test_fast_direct_call(ptr %f) nounwind { +; CHECK-LABEL: test_fast_direct_call: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushq %rax +; CHECK-NEXT: callq foo_fast@PLT +; CHECK-NEXT: callq bar@PLT +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: retq +entry: + %call = call fastcc bfloat @foo_fast(ptr %f) + %call2 = call zeroext i8 @bar(bfloat %call) + ret i8 %call2 +} + +define i8 @test_indirect_all(ptr %fptr, ptr %f) nounwind { +; CHECK-LABEL: test_indirect_all: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: movq %rdi, %rbx +; CHECK-NEXT: movq %rsi, %rdi +; CHECK-NEXT: callq foo@PLT +; CHECK-NEXT: callq *%rbx +; CHECK-NEXT: popq %rbx +; CHECK-NEXT: retq +entry: + %call = call bfloat @foo(ptr %f) + %call2 = call zeroext i8 %fptr(bfloat %call) + ret i8 %call2 +} + +define i8 @test_fast_indirect_all(ptr %fptr, ptr %f) nounwind { +; CHECK-LABEL: test_fast_indirect_all: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: movq %rdi, %rbx +; CHECK-NEXT: movq %rsi, %rdi +; CHECK-NEXT: callq foo@PLT +; CHECK-NEXT: callq *%rbx +; CHECK-NEXT: popq %rbx +; CHECK-NEXT: retq +entry: + %call = call fastcc bfloat @foo(ptr %f) + %call2 = call zeroext i8 %fptr(bfloat %call) + ret i8 %call2 +} + +declare bfloat @foo(ptr %f) +declare zeroext i8 @bar(bfloat) +declare fastcc bfloat @foo_fast(ptr %f) diff --git a/llvm/test/CodeGen/X86/dag-fmf-cse.ll b/llvm/test/CodeGen/X86/dag-fmf-cse.ll index 609ccdc..cdcc082 100644 --- a/llvm/test/CodeGen/X86/dag-fmf-cse.ll +++ b/llvm/test/CodeGen/X86/dag-fmf-cse.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=fma -enable-unsafe-fp-math | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=fma | FileCheck %s ; If fast-math-flags are propagated correctly, the mul1 expression ; should be recognized as a factor in the last fsub, so we should diff --git a/llvm/test/CodeGen/X86/fabs.ll b/llvm/test/CodeGen/X86/fabs.ll index 82c82ac..4e6da83 100644 --- a/llvm/test/CodeGen/X86/fabs.ll +++ b/llvm/test/CodeGen/X86/fabs.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse,-sse2,-sse3 | FileCheck %s --check-prefix=X87 -; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse,-sse2,-sse3 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s --check-prefix=X87UNSAFE +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse,-sse2,-sse3 -enable-no-nans-fp-math | FileCheck %s --check-prefix=X87UNSAFE ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 declare float @fabsf(float) diff --git a/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll b/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll index 0fe107c..aae6cda 100644 --- a/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll +++ b/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll @@ -22,25 +22,24 @@ declare <4 x bfloat> @llvm.maximumnum.v4bf16(<4 x bfloat>, <4 x bfloat>) define float @test_fmaximumnum(float %x, float %y) nounwind { ; SSE2-LABEL: test_fmaximumnum: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: testl %eax, %eax -; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: js .LBB0_2 -; SSE2-NEXT: # %bb.1: +; SSE2-NEXT: js .LBB0_1 +; SSE2-NEXT: # %bb.2: +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: jmp .LBB0_3 +; SSE2-NEXT: .LBB0_1: +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: .LBB0_3: ; SSE2-NEXT: movdqa %xmm1, %xmm3 -; SSE2-NEXT: .LBB0_2: -; SSE2-NEXT: movdqa %xmm3, %xmm0 -; SSE2-NEXT: cmpordss %xmm3, %xmm0 -; SSE2-NEXT: movaps %xmm0, %xmm4 -; SSE2-NEXT: andps %xmm3, %xmm4 -; SSE2-NEXT: js .LBB0_4 -; SSE2-NEXT: # %bb.3: -; SSE2-NEXT: movdqa %xmm2, %xmm1 -; SSE2-NEXT: .LBB0_4: -; SSE2-NEXT: maxss %xmm1, %xmm3 -; SSE2-NEXT: andnps %xmm3, %xmm0 -; SSE2-NEXT: orps %xmm4, %xmm0 +; SSE2-NEXT: maxss %xmm2, %xmm3 +; SSE2-NEXT: movaps %xmm3, %xmm0 +; SSE2-NEXT: cmpunordss %xmm3, %xmm0 +; SSE2-NEXT: movaps %xmm0, %xmm2 +; SSE2-NEXT: andnps %xmm3, %xmm2 +; SSE2-NEXT: andps %xmm1, %xmm0 +; SSE2-NEXT: orps %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; AVX1-LABEL: test_fmaximumnum: @@ -56,7 +55,7 @@ define float @test_fmaximumnum(float %x, float %y) nounwind { ; AVX1-NEXT: vmovdqa %xmm0, %xmm1 ; AVX1-NEXT: .LBB0_3: ; AVX1-NEXT: vmaxss %xmm2, %xmm1, %xmm0 -; AVX1-NEXT: vcmpordss %xmm1, %xmm1, %xmm2 +; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm2 ; AVX1-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; @@ -70,7 +69,7 @@ define float @test_fmaximumnum(float %x, float %y) nounwind { ; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} ; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0 -; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1 +; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1 ; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} ; AVX512-NEXT: retq ; @@ -95,7 +94,7 @@ define float @test_fmaximumnum(float %x, float %y) nounwind { ; X86-NEXT: vmovdqa %xmm2, %xmm0 ; X86-NEXT: .LBB0_3: ; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1 -; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: vmovss %xmm0, (%esp) ; X86-NEXT: flds (%esp) @@ -371,26 +370,25 @@ define float @test_fmaximumnum_nsz(float %x, float %y) "no-signed-zeros-fp-math" ; SSE2-LABEL: test_fmaximumnum_nsz: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps %xmm0, %xmm2 -; SSE2-NEXT: cmpordss %xmm0, %xmm2 -; SSE2-NEXT: movaps %xmm2, %xmm3 -; SSE2-NEXT: andps %xmm0, %xmm3 -; SSE2-NEXT: maxss %xmm1, %xmm0 -; SSE2-NEXT: andnps %xmm0, %xmm2 -; SSE2-NEXT: orps %xmm3, %xmm2 -; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: maxss %xmm1, %xmm2 +; SSE2-NEXT: movaps %xmm2, %xmm1 +; SSE2-NEXT: cmpunordss %xmm2, %xmm1 +; SSE2-NEXT: andps %xmm1, %xmm0 +; SSE2-NEXT: andnps %xmm2, %xmm1 +; SSE2-NEXT: orps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX1-LABEL: test_fmaximumnum_nsz: ; AVX1: # %bb.0: ; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1 -; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: retq ; ; AVX512-LABEL: test_fmaximumnum_nsz: ; AVX512: # %bb.0: ; AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm1 -; AVX512-NEXT: vcmpordss %xmm0, %xmm0, %k1 +; AVX512-NEXT: vcmpunordss %xmm1, %xmm1, %k1 ; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-NEXT: vmovaps %xmm1, %xmm0 ; AVX512-NEXT: retq @@ -404,9 +402,9 @@ define float @test_fmaximumnum_nsz(float %x, float %y) "no-signed-zeros-fp-math" ; X86: # %bb.0: ; X86-NEXT: pushl %eax ; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm1 -; X86-NEXT: vmaxss {{[0-9]+}}(%esp), %xmm0, %xmm2 -; X86-NEXT: vblendvps %xmm1, %xmm0, %xmm2, %xmm0 +; X86-NEXT: vmaxss {{[0-9]+}}(%esp), %xmm0, %xmm1 +; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 +; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: vmovss %xmm0, (%esp) ; X86-NEXT: flds (%esp) ; X86-NEXT: popl %eax @@ -421,23 +419,22 @@ define float @test_fmaximumnum_combine_cmps(float %x, float %y) nounwind { ; SSE2-NEXT: divss %xmm0, %xmm1 ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: testl %eax, %eax -; SSE2-NEXT: movaps %xmm0, %xmm3 -; SSE2-NEXT: js .LBB9_2 -; SSE2-NEXT: # %bb.1: -; SSE2-NEXT: movaps %xmm1, %xmm3 -; SSE2-NEXT: .LBB9_2: -; SSE2-NEXT: movaps %xmm3, %xmm2 -; SSE2-NEXT: cmpordss %xmm3, %xmm2 -; SSE2-NEXT: movaps %xmm2, %xmm4 -; SSE2-NEXT: andps %xmm3, %xmm4 -; SSE2-NEXT: js .LBB9_4 -; SSE2-NEXT: # %bb.3: +; SSE2-NEXT: js .LBB9_1 +; SSE2-NEXT: # %bb.2: +; SSE2-NEXT: movaps %xmm0, %xmm2 +; SSE2-NEXT: jmp .LBB9_3 +; SSE2-NEXT: .LBB9_1: +; SSE2-NEXT: movaps %xmm1, %xmm2 ; SSE2-NEXT: movaps %xmm0, %xmm1 -; SSE2-NEXT: .LBB9_4: -; SSE2-NEXT: maxss %xmm1, %xmm3 +; SSE2-NEXT: .LBB9_3: +; SSE2-NEXT: movaps %xmm1, %xmm3 +; SSE2-NEXT: maxss %xmm2, %xmm3 +; SSE2-NEXT: movaps %xmm3, %xmm0 +; SSE2-NEXT: cmpunordss %xmm3, %xmm0 +; SSE2-NEXT: movaps %xmm0, %xmm2 ; SSE2-NEXT: andnps %xmm3, %xmm2 -; SSE2-NEXT: orps %xmm4, %xmm2 -; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: andps %xmm1, %xmm0 +; SSE2-NEXT: orps %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; AVX1-LABEL: test_fmaximumnum_combine_cmps: @@ -454,7 +451,7 @@ define float @test_fmaximumnum_combine_cmps(float %x, float %y) nounwind { ; AVX1-NEXT: vmovaps %xmm0, %xmm1 ; AVX1-NEXT: .LBB9_3: ; AVX1-NEXT: vmaxss %xmm2, %xmm1, %xmm0 -; AVX1-NEXT: vcmpordss %xmm1, %xmm1, %xmm2 +; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm2 ; AVX1-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; @@ -469,7 +466,7 @@ define float @test_fmaximumnum_combine_cmps(float %x, float %y) nounwind { ; AVX512F-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} ; AVX512F-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512F-NEXT: vmaxss %xmm2, %xmm1, %xmm0 -; AVX512F-NEXT: vcmpordss %xmm1, %xmm1, %k1 +; AVX512F-NEXT: vcmpunordss %xmm0, %xmm0, %k1 ; AVX512F-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} ; AVX512F-NEXT: retq ; @@ -507,7 +504,7 @@ define float @test_fmaximumnum_combine_cmps(float %x, float %y) nounwind { ; X86-NEXT: vmovaps %xmm1, %xmm0 ; X86-NEXT: .LBB9_3: ; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1 -; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: vmovss %xmm0, (%esp) ; X86-NEXT: flds (%esp) @@ -527,23 +524,23 @@ define float @test_fminimumnum(float %x, float %y) nounwind { ; SSE2: # %bb.0: ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: testl %eax, %eax -; SSE2-NEXT: movdqa %xmm1, %xmm3 -; SSE2-NEXT: js .LBB10_2 -; SSE2-NEXT: # %bb.1: -; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: .LBB10_2: -; SSE2-NEXT: movdqa %xmm3, %xmm2 -; SSE2-NEXT: cmpordss %xmm3, %xmm2 -; SSE2-NEXT: movaps %xmm2, %xmm4 -; SSE2-NEXT: andps %xmm3, %xmm4 -; SSE2-NEXT: js .LBB10_4 -; SSE2-NEXT: # %bb.3: +; SSE2-NEXT: js .LBB10_1 +; SSE2-NEXT: # %bb.2: +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: jmp .LBB10_3 +; SSE2-NEXT: .LBB10_1: +; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm0 -; SSE2-NEXT: .LBB10_4: -; SSE2-NEXT: minss %xmm0, %xmm3 +; SSE2-NEXT: .LBB10_3: +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: minss %xmm2, %xmm3 +; SSE2-NEXT: movaps %xmm3, %xmm1 +; SSE2-NEXT: cmpunordss %xmm3, %xmm1 +; SSE2-NEXT: movaps %xmm1, %xmm2 ; SSE2-NEXT: andnps %xmm3, %xmm2 -; SSE2-NEXT: orps %xmm4, %xmm2 -; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: andps %xmm0, %xmm1 +; SSE2-NEXT: orps %xmm2, %xmm1 +; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX1-LABEL: test_fminimumnum: @@ -559,7 +556,7 @@ define float @test_fminimumnum(float %x, float %y) nounwind { ; AVX1-NEXT: vmovdqa %xmm1, %xmm0 ; AVX1-NEXT: .LBB10_3: ; AVX1-NEXT: vminss %xmm2, %xmm0, %xmm1 -; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: retq ; @@ -573,7 +570,7 @@ define float @test_fminimumnum(float %x, float %y) nounwind { ; AVX512-NEXT: vmovss %xmm0, %xmm2, %xmm2 {%k1} ; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} ; AVX512-NEXT: vminss %xmm2, %xmm0, %xmm1 -; AVX512-NEXT: vcmpordss %xmm0, %xmm0, %k1 +; AVX512-NEXT: vcmpunordss %xmm1, %xmm1, %k1 ; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-NEXT: vmovaps %xmm1, %xmm0 ; AVX512-NEXT: retq @@ -599,7 +596,7 @@ define float @test_fminimumnum(float %x, float %y) nounwind { ; X86-NEXT: vmovdqa %xmm1, %xmm0 ; X86-NEXT: .LBB10_3: ; X86-NEXT: vminss %xmm2, %xmm0, %xmm1 -; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: vmovss %xmm0, (%esp) ; X86-NEXT: flds (%esp) @@ -857,26 +854,25 @@ define float @test_fminimumnum_nsz(float %x, float %y) nounwind { ; SSE2-LABEL: test_fminimumnum_nsz: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps %xmm0, %xmm2 -; SSE2-NEXT: cmpordss %xmm0, %xmm2 -; SSE2-NEXT: movaps %xmm2, %xmm3 -; SSE2-NEXT: andps %xmm0, %xmm3 -; SSE2-NEXT: minss %xmm1, %xmm0 -; SSE2-NEXT: andnps %xmm0, %xmm2 -; SSE2-NEXT: orps %xmm3, %xmm2 -; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: minss %xmm1, %xmm2 +; SSE2-NEXT: movaps %xmm2, %xmm1 +; SSE2-NEXT: cmpunordss %xmm2, %xmm1 +; SSE2-NEXT: andps %xmm1, %xmm0 +; SSE2-NEXT: andnps %xmm2, %xmm1 +; SSE2-NEXT: orps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX1-LABEL: test_fminimumnum_nsz: ; AVX1: # %bb.0: ; AVX1-NEXT: vminss %xmm1, %xmm0, %xmm1 -; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: retq ; ; AVX512-LABEL: test_fminimumnum_nsz: ; AVX512: # %bb.0: ; AVX512-NEXT: vminss %xmm1, %xmm0, %xmm1 -; AVX512-NEXT: vcmpordss %xmm0, %xmm0, %k1 +; AVX512-NEXT: vcmpunordss %xmm1, %xmm1, %k1 ; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-NEXT: vmovaps %xmm1, %xmm0 ; AVX512-NEXT: retq @@ -890,9 +886,9 @@ define float @test_fminimumnum_nsz(float %x, float %y) nounwind { ; X86: # %bb.0: ; X86-NEXT: pushl %eax ; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm1 -; X86-NEXT: vminss {{[0-9]+}}(%esp), %xmm0, %xmm2 -; X86-NEXT: vblendvps %xmm1, %xmm0, %xmm2, %xmm0 +; X86-NEXT: vminss {{[0-9]+}}(%esp), %xmm0, %xmm1 +; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 +; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: vmovss %xmm0, (%esp) ; X86-NEXT: flds (%esp) ; X86-NEXT: popl %eax @@ -907,23 +903,23 @@ define float @test_fminimumnum_combine_cmps(float %x, float %y) nounwind { ; SSE2-NEXT: divss %xmm0, %xmm1 ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: testl %eax, %eax -; SSE2-NEXT: movaps %xmm1, %xmm3 -; SSE2-NEXT: js .LBB19_2 -; SSE2-NEXT: # %bb.1: -; SSE2-NEXT: movaps %xmm0, %xmm3 -; SSE2-NEXT: .LBB19_2: -; SSE2-NEXT: movaps %xmm3, %xmm2 -; SSE2-NEXT: cmpordss %xmm3, %xmm2 -; SSE2-NEXT: movaps %xmm2, %xmm4 -; SSE2-NEXT: andps %xmm3, %xmm4 -; SSE2-NEXT: js .LBB19_4 -; SSE2-NEXT: # %bb.3: +; SSE2-NEXT: js .LBB19_1 +; SSE2-NEXT: # %bb.2: +; SSE2-NEXT: movaps %xmm1, %xmm2 +; SSE2-NEXT: jmp .LBB19_3 +; SSE2-NEXT: .LBB19_1: +; SSE2-NEXT: movaps %xmm0, %xmm2 ; SSE2-NEXT: movaps %xmm1, %xmm0 -; SSE2-NEXT: .LBB19_4: -; SSE2-NEXT: minss %xmm0, %xmm3 +; SSE2-NEXT: .LBB19_3: +; SSE2-NEXT: movaps %xmm0, %xmm3 +; SSE2-NEXT: minss %xmm2, %xmm3 +; SSE2-NEXT: movaps %xmm3, %xmm1 +; SSE2-NEXT: cmpunordss %xmm3, %xmm1 +; SSE2-NEXT: movaps %xmm1, %xmm2 ; SSE2-NEXT: andnps %xmm3, %xmm2 -; SSE2-NEXT: orps %xmm4, %xmm2 -; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: andps %xmm0, %xmm1 +; SSE2-NEXT: orps %xmm2, %xmm1 +; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX1-LABEL: test_fminimumnum_combine_cmps: @@ -940,7 +936,7 @@ define float @test_fminimumnum_combine_cmps(float %x, float %y) nounwind { ; AVX1-NEXT: vmovaps %xmm2, %xmm0 ; AVX1-NEXT: .LBB19_3: ; AVX1-NEXT: vminss %xmm1, %xmm0, %xmm1 -; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: retq ; @@ -955,7 +951,7 @@ define float @test_fminimumnum_combine_cmps(float %x, float %y) nounwind { ; AVX512F-NEXT: vmovss %xmm0, %xmm2, %xmm2 {%k1} ; AVX512F-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} ; AVX512F-NEXT: vminss %xmm2, %xmm0, %xmm1 -; AVX512F-NEXT: vcmpordss %xmm0, %xmm0, %k1 +; AVX512F-NEXT: vcmpunordss %xmm1, %xmm1, %k1 ; AVX512F-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512F-NEXT: vmovaps %xmm1, %xmm0 ; AVX512F-NEXT: retq @@ -994,7 +990,7 @@ define float @test_fminimumnum_combine_cmps(float %x, float %y) nounwind { ; X86-NEXT: vmovaps %xmm2, %xmm0 ; X86-NEXT: .LBB19_3: ; X86-NEXT: vminss %xmm1, %xmm0, %xmm1 -; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: vmovss %xmm0, (%esp) ; X86-NEXT: flds (%esp) @@ -1022,9 +1018,9 @@ define <2 x double> @test_fminimumnum_vector(<2 x double> %x, <2 x double> %y) { ; SSE2-NEXT: por %xmm4, %xmm3 ; SSE2-NEXT: movdqa %xmm3, %xmm1 ; SSE2-NEXT: minpd %xmm0, %xmm1 -; SSE2-NEXT: movdqa %xmm3, %xmm0 -; SSE2-NEXT: cmpordpd %xmm3, %xmm0 -; SSE2-NEXT: andpd %xmm0, %xmm3 +; SSE2-NEXT: movapd %xmm1, %xmm0 +; SSE2-NEXT: cmpunordpd %xmm1, %xmm0 +; SSE2-NEXT: pand %xmm0, %xmm3 ; SSE2-NEXT: andnpd %xmm1, %xmm0 ; SSE2-NEXT: orpd %xmm3, %xmm0 ; SSE2-NEXT: retq @@ -1034,7 +1030,7 @@ define <2 x double> @test_fminimumnum_vector(<2 x double> %x, <2 x double> %y) { ; AVX-NEXT: vblendvpd %xmm0, %xmm0, %xmm1, %xmm2 ; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vminpd %xmm2, %xmm0, %xmm1 -; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2 +; AVX-NEXT: vcmpunordpd %xmm1, %xmm1, %xmm2 ; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq ; @@ -1048,7 +1044,7 @@ define <2 x double> @test_fminimumnum_vector(<2 x double> %x, <2 x double> %y) { ; X86-NEXT: vblendvpd %xmm0, %xmm0, %xmm1, %xmm2 ; X86-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0 ; X86-NEXT: vminpd %xmm2, %xmm0, %xmm1 -; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2 +; X86-NEXT: vcmpunordpd %xmm1, %xmm1, %xmm2 ; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: retl %r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> %y) @@ -1084,19 +1080,17 @@ define <2 x double> @test_fminimumnum_vector_zero(<2 x double> %x) { ; SSE2: # %bb.0: ; SSE2-NEXT: xorpd %xmm1, %xmm1 ; SSE2-NEXT: minpd %xmm0, %xmm1 -; SSE2-NEXT: movapd %xmm0, %xmm2 -; SSE2-NEXT: cmpordpd %xmm0, %xmm2 -; SSE2-NEXT: andpd %xmm2, %xmm0 -; SSE2-NEXT: andnpd %xmm1, %xmm2 -; SSE2-NEXT: orpd %xmm2, %xmm0 +; SSE2-NEXT: movapd %xmm1, %xmm0 +; SSE2-NEXT: cmpunordpd %xmm1, %xmm0 +; SSE2-NEXT: andnpd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: test_fminimumnum_vector_zero: ; AVX: # %bb.0: ; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm1 -; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2 -; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm1 +; AVX-NEXT: vandnpd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq ; ; AVX10_2-LABEL: test_fminimumnum_vector_zero: @@ -1108,9 +1102,9 @@ define <2 x double> @test_fminimumnum_vector_zero(<2 x double> %x) { ; X86-LABEL: test_fminimumnum_vector_zero: ; X86: # %bb.0: ; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1 -; X86-NEXT: vminpd %xmm0, %xmm1, %xmm1 -; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2 -; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 +; X86-NEXT: vminpd %xmm0, %xmm1, %xmm0 +; X86-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm1 +; X86-NEXT: vandnpd %xmm0, %xmm1, %xmm0 ; X86-NEXT: retl %r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double 0., double 0.>) ret <2 x double> %r @@ -1120,20 +1114,21 @@ define <4 x float> @test_fmaximumnum_vector_signed_zero(<4 x float> %x) { ; SSE2-LABEL: test_fmaximumnum_vector_signed_zero: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] -; SSE2-NEXT: maxps %xmm0, %xmm1 -; SSE2-NEXT: movaps %xmm0, %xmm2 -; SSE2-NEXT: cmpordps %xmm0, %xmm2 -; SSE2-NEXT: andps %xmm2, %xmm0 -; SSE2-NEXT: andnps %xmm1, %xmm2 -; SSE2-NEXT: orps %xmm2, %xmm0 +; SSE2-NEXT: movaps %xmm1, %xmm2 +; SSE2-NEXT: maxps %xmm0, %xmm2 +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: cmpunordps %xmm2, %xmm0 +; SSE2-NEXT: andps %xmm0, %xmm1 +; SSE2-NEXT: andnps %xmm2, %xmm0 +; SSE2-NEXT: orps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: test_fmaximumnum_vector_signed_zero: ; AVX: # %bb.0: ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] -; AVX-NEXT: vmaxps %xmm0, %xmm1, %xmm1 -; AVX-NEXT: vcmpordps %xmm0, %xmm0, %xmm2 -; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmaxps %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vcmpunordps %xmm0, %xmm0, %xmm2 +; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX10_2-LABEL: test_fmaximumnum_vector_signed_zero: @@ -1144,9 +1139,9 @@ define <4 x float> @test_fmaximumnum_vector_signed_zero(<4 x float> %x) { ; X86-LABEL: test_fmaximumnum_vector_signed_zero: ; X86: # %bb.0: ; X86-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] -; X86-NEXT: vmaxps %xmm0, %xmm1, %xmm1 -; X86-NEXT: vcmpordps %xmm0, %xmm0, %xmm2 -; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 +; X86-NEXT: vmaxps %xmm0, %xmm1, %xmm0 +; X86-NEXT: vcmpunordps %xmm0, %xmm0, %xmm2 +; X86-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ; X86-NEXT: retl %r = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> %x, <4 x float> <float -0., float -0., float -0., float -0.>) ret <4 x float> %r @@ -1155,13 +1150,14 @@ define <4 x float> @test_fmaximumnum_vector_signed_zero(<4 x float> %x) { define <2 x double> @test_fminimumnum_vector_partially_zero(<2 x double> %x) { ; SSE2-LABEL: test_fminimumnum_vector_partially_zero: ; SSE2: # %bb.0: -; SSE2-NEXT: movapd %xmm0, %xmm1 -; SSE2-NEXT: cmpordpd %xmm0, %xmm1 -; SSE2-NEXT: xorpd %xmm2, %xmm2 -; SSE2-NEXT: movhpd {{.*#+}} xmm2 = xmm2[0],mem[0] +; SSE2-NEXT: xorpd %xmm1, %xmm1 +; SSE2-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; SSE2-NEXT: movapd %xmm1, %xmm2 ; SSE2-NEXT: minpd %xmm0, %xmm2 -; SSE2-NEXT: andpd %xmm1, %xmm0 -; SSE2-NEXT: andnpd %xmm2, %xmm1 +; SSE2-NEXT: movapd %xmm2, %xmm0 +; SSE2-NEXT: cmpunordpd %xmm2, %xmm0 +; SSE2-NEXT: andpd %xmm0, %xmm1 +; SSE2-NEXT: andnpd %xmm2, %xmm0 ; SSE2-NEXT: orpd %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -1169,9 +1165,9 @@ define <2 x double> @test_fminimumnum_vector_partially_zero(<2 x double> %x) { ; AVX: # %bb.0: ; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] -; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm1 -; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2 -; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm2 +; AVX-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX10_2-LABEL: test_fminimumnum_vector_partially_zero: @@ -1185,9 +1181,9 @@ define <2 x double> @test_fminimumnum_vector_partially_zero(<2 x double> %x) { ; X86: # %bb.0: ; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1 ; X86-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] -; X86-NEXT: vminpd %xmm0, %xmm1, %xmm1 -; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2 -; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 +; X86-NEXT: vminpd %xmm0, %xmm1, %xmm0 +; X86-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm2 +; X86-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 ; X86-NEXT: retl %r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double 0., double 5.>) ret <2 x double> %r @@ -1212,9 +1208,9 @@ define <2 x double> @test_fminimumnum_vector_different_zeros(<2 x double> %x) { ; SSE2-NEXT: por %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm3, %xmm1 ; SSE2-NEXT: minpd %xmm4, %xmm1 -; SSE2-NEXT: movdqa %xmm3, %xmm0 -; SSE2-NEXT: cmpordpd %xmm3, %xmm0 -; SSE2-NEXT: andpd %xmm0, %xmm3 +; SSE2-NEXT: movapd %xmm1, %xmm0 +; SSE2-NEXT: cmpunordpd %xmm1, %xmm0 +; SSE2-NEXT: pand %xmm0, %xmm3 ; SSE2-NEXT: andnpd %xmm1, %xmm0 ; SSE2-NEXT: orpd %xmm3, %xmm0 ; SSE2-NEXT: retq @@ -1226,7 +1222,7 @@ define <2 x double> @test_fminimumnum_vector_different_zeros(<2 x double> %x) { ; AVX-NEXT: vblendvpd %xmm0, %xmm0, %xmm1, %xmm2 ; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vminpd %xmm2, %xmm0, %xmm1 -; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2 +; AVX-NEXT: vcmpunordpd %xmm1, %xmm1, %xmm2 ; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq ; @@ -1244,7 +1240,7 @@ define <2 x double> @test_fminimumnum_vector_different_zeros(<2 x double> %x) { ; X86-NEXT: vblendvpd %xmm0, %xmm0, %xmm1, %xmm2 ; X86-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0 ; X86-NEXT: vminpd %xmm2, %xmm0, %xmm1 -; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2 +; X86-NEXT: vcmpunordpd %xmm1, %xmm1, %xmm2 ; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: retl %r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double 0., double -0.>) @@ -1278,20 +1274,24 @@ define <4 x float> @test_fmaximumnum_vector_non_zero(<4 x float> %x) { define <2 x double> @test_fminimumnum_vector_nan(<2 x double> %x) { ; SSE2-LABEL: test_fminimumnum_vector_nan: ; SSE2: # %bb.0: -; SSE2-NEXT: xorpd %xmm2, %xmm2 ; SSE2-NEXT: xorpd %xmm1, %xmm1 ; SSE2-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0] -; SSE2-NEXT: minpd %xmm0, %xmm1 -; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] -; SSE2-NEXT: movapd %xmm1, %xmm0 +; SSE2-NEXT: movapd %xmm1, %xmm2 +; SSE2-NEXT: minpd %xmm0, %xmm2 +; SSE2-NEXT: movapd %xmm2, %xmm0 +; SSE2-NEXT: cmpunordpd %xmm2, %xmm0 +; SSE2-NEXT: andpd %xmm0, %xmm1 +; SSE2-NEXT: andnpd %xmm2, %xmm0 +; SSE2-NEXT: orpd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: test_fminimumnum_vector_nan: ; AVX: # %bb.0: ; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vmovhpd {{.*#+}} xmm2 = xmm1[0],mem[0] -; AVX-NEXT: vminpd %xmm0, %xmm2, %xmm0 -; AVX-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; AVX-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] +; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm2 +; AVX-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX10_2-LABEL: test_fminimumnum_vector_nan: @@ -1306,7 +1306,7 @@ define <2 x double> @test_fminimumnum_vector_nan(<2 x double> %x) { ; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1 ; X86-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] ; X86-NEXT: vminpd %xmm0, %xmm1, %xmm0 -; X86-NEXT: vcmpordpd %xmm1, %xmm1, %xmm2 +; X86-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm2 ; X86-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 ; X86-NEXT: retl %r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double 0., double 0x7fff000000000000>) @@ -1318,19 +1318,17 @@ define <2 x double> @test_fminimumnum_vector_zero_first(<2 x double> %x) { ; SSE2: # %bb.0: ; SSE2-NEXT: xorpd %xmm1, %xmm1 ; SSE2-NEXT: minpd %xmm0, %xmm1 -; SSE2-NEXT: movapd %xmm0, %xmm2 -; SSE2-NEXT: cmpordpd %xmm0, %xmm2 -; SSE2-NEXT: andpd %xmm2, %xmm0 -; SSE2-NEXT: andnpd %xmm1, %xmm2 -; SSE2-NEXT: orpd %xmm2, %xmm0 +; SSE2-NEXT: movapd %xmm1, %xmm0 +; SSE2-NEXT: cmpunordpd %xmm1, %xmm0 +; SSE2-NEXT: andnpd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: test_fminimumnum_vector_zero_first: ; AVX: # %bb.0: ; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm1 -; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2 -; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm1 +; AVX-NEXT: vandnpd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq ; ; AVX10_2-LABEL: test_fminimumnum_vector_zero_first: @@ -1342,9 +1340,9 @@ define <2 x double> @test_fminimumnum_vector_zero_first(<2 x double> %x) { ; X86-LABEL: test_fminimumnum_vector_zero_first: ; X86: # %bb.0: ; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1 -; X86-NEXT: vminpd %xmm0, %xmm1, %xmm1 -; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2 -; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 +; X86-NEXT: vminpd %xmm0, %xmm1, %xmm0 +; X86-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm1 +; X86-NEXT: vandnpd %xmm0, %xmm1, %xmm0 ; X86-NEXT: retl %r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> <double 0., double 0.>, <2 x double> %x) ret <2 x double> %r @@ -1378,20 +1376,21 @@ define <4 x float> @test_fmaximumnum_vector_signed_zero_first(<4 x float> %x) { ; SSE2-LABEL: test_fmaximumnum_vector_signed_zero_first: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] -; SSE2-NEXT: maxps %xmm0, %xmm1 -; SSE2-NEXT: movaps %xmm0, %xmm2 -; SSE2-NEXT: cmpordps %xmm0, %xmm2 -; SSE2-NEXT: andps %xmm2, %xmm0 -; SSE2-NEXT: andnps %xmm1, %xmm2 -; SSE2-NEXT: orps %xmm2, %xmm0 +; SSE2-NEXT: movaps %xmm1, %xmm2 +; SSE2-NEXT: maxps %xmm0, %xmm2 +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: cmpunordps %xmm2, %xmm0 +; SSE2-NEXT: andps %xmm0, %xmm1 +; SSE2-NEXT: andnps %xmm2, %xmm0 +; SSE2-NEXT: orps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: test_fmaximumnum_vector_signed_zero_first: ; AVX: # %bb.0: ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] -; AVX-NEXT: vmaxps %xmm0, %xmm1, %xmm1 -; AVX-NEXT: vcmpordps %xmm0, %xmm0, %xmm2 -; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmaxps %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vcmpunordps %xmm0, %xmm0, %xmm2 +; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX10_2-LABEL: test_fmaximumnum_vector_signed_zero_first: @@ -1402,9 +1401,9 @@ define <4 x float> @test_fmaximumnum_vector_signed_zero_first(<4 x float> %x) { ; X86-LABEL: test_fmaximumnum_vector_signed_zero_first: ; X86: # %bb.0: ; X86-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] -; X86-NEXT: vmaxps %xmm0, %xmm1, %xmm1 -; X86-NEXT: vcmpordps %xmm0, %xmm0, %xmm2 -; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 +; X86-NEXT: vmaxps %xmm0, %xmm1, %xmm0 +; X86-NEXT: vcmpunordps %xmm0, %xmm0, %xmm2 +; X86-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ; X86-NEXT: retl %r = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> <float -0., float -0., float -0., float -0.>, <4 x float> %x) ret <4 x float> %r @@ -1455,11 +1454,11 @@ define <4 x float> @test_fmaximumnum_v4f32_splat(<4 x float> %x, float %y) { ; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: maxps %xmm4, %xmm1 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: cmpordps %xmm0, %xmm2 -; SSE2-NEXT: andps %xmm2, %xmm0 +; SSE2-NEXT: movaps %xmm1, %xmm2 +; SSE2-NEXT: cmpunordps %xmm1, %xmm2 +; SSE2-NEXT: pand %xmm2, %xmm0 ; SSE2-NEXT: andnps %xmm1, %xmm2 -; SSE2-NEXT: orps %xmm2, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; AVX1-LABEL: test_fmaximumnum_v4f32_splat: @@ -1468,7 +1467,7 @@ define <4 x float> @test_fmaximumnum_v4f32_splat(<4 x float> %x, float %y) { ; AVX1-NEXT: vblendvps %xmm0, %xmm1, %xmm0, %xmm2 ; AVX1-NEXT: vblendvps %xmm0, %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vmaxps %xmm2, %xmm0, %xmm1 -; AVX1-NEXT: vcmpordps %xmm0, %xmm0, %xmm2 +; AVX1-NEXT: vcmpunordps %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: retq ; @@ -1478,7 +1477,7 @@ define <4 x float> @test_fmaximumnum_v4f32_splat(<4 x float> %x, float %y) { ; AVX512-NEXT: vblendvps %xmm0, %xmm1, %xmm0, %xmm2 ; AVX512-NEXT: vblendvps %xmm0, %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: vmaxps %xmm2, %xmm0, %xmm1 -; AVX512-NEXT: vcmpordps %xmm0, %xmm0, %xmm2 +; AVX512-NEXT: vcmpunordps %xmm1, %xmm1, %xmm2 ; AVX512-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: retq ; @@ -1494,7 +1493,7 @@ define <4 x float> @test_fmaximumnum_v4f32_splat(<4 x float> %x, float %y) { ; X86-NEXT: vblendvps %xmm0, %xmm1, %xmm0, %xmm2 ; X86-NEXT: vblendvps %xmm0, %xmm0, %xmm1, %xmm0 ; X86-NEXT: vmaxps %xmm2, %xmm0, %xmm1 -; X86-NEXT: vcmpordps %xmm0, %xmm0, %xmm2 +; X86-NEXT: vcmpunordps %xmm1, %xmm1, %xmm2 ; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: retl %splatinsert = insertelement <4 x float> poison, float %y, i64 0 @@ -1506,134 +1505,130 @@ define <4 x float> @test_fmaximumnum_v4f32_splat(<4 x float> %x, float %y) { define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind { ; SSE2-LABEL: test_fmaximumnum_v4f16: ; SSE2: # %bb.0: -; SSE2-NEXT: subq $104, %rsp -; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE2-NEXT: subq $136, %rsp +; SSE2-NEXT: movaps %xmm0, %xmm2 +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[1,1] +; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE2-NEXT: movaps %xmm1, %xmm2 +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1] +; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; SSE2-NEXT: movss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE2-NEXT: psrld $16, %xmm0 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill +; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: psrld $16, %xmm0 ; SSE2-NEXT: callq __extendhfsf2@PLT -; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill +; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE2-NEXT: callq __extendhfsf2@PLT -; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 4-byte Folded Reload -; SSE2-NEXT: # xmm4 = mem[0],zero,zero,zero -; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: testl %eax, %eax -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: js .LBB33_2 -; SSE2-NEXT: # %bb.1: -; SSE2-NEXT: movdqa %xmm4, %xmm2 -; SSE2-NEXT: .LBB33_2: -; SSE2-NEXT: movdqa %xmm2, %xmm0 -; SSE2-NEXT: cmpordss %xmm2, %xmm0 -; SSE2-NEXT: movaps %xmm0, %xmm3 -; SSE2-NEXT: andps %xmm2, %xmm3 -; SSE2-NEXT: js .LBB33_4 -; SSE2-NEXT: # %bb.3: -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: .LBB33_4: -; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1] -; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE2-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1] -; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE2-NEXT: maxss %xmm4, %xmm2 -; SSE2-NEXT: andnps %xmm2, %xmm0 -; SSE2-NEXT: orps %xmm3, %xmm0 +; SSE2-NEXT: js .LBB33_1 +; SSE2-NEXT: # %bb.2: +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload +; SSE2-NEXT: jmp .LBB33_3 +; SSE2-NEXT: .LBB33_1: +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: .LBB33_3: +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: psrlq $48, %xmm0 +; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; SSE2-NEXT: psrlq $48, %xmm0 +; SSE2-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: maxss %xmm1, %xmm2 +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: cmpunordss %xmm2, %xmm0 +; SSE2-NEXT: movaps %xmm0, %xmm1 +; SSE2-NEXT: andnps %xmm2, %xmm1 +; SSE2-NEXT: andps %xmm3, %xmm0 +; SSE2-NEXT: orps %xmm1, %xmm0 ; SSE2-NEXT: callq __truncsfhf2@PLT ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload +; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: callq __extendhfsf2@PLT -; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill -; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload +; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: callq __extendhfsf2@PLT -; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 4-byte Folded Reload -; SSE2-NEXT: # xmm4 = mem[0],zero,zero,zero -; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: testl %eax, %eax -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: js .LBB33_6 +; SSE2-NEXT: js .LBB33_4 ; SSE2-NEXT: # %bb.5: -; SSE2-NEXT: movdqa %xmm4, %xmm2 -; SSE2-NEXT: .LBB33_6: -; SSE2-NEXT: movdqa %xmm2, %xmm0 -; SSE2-NEXT: cmpordss %xmm2, %xmm0 -; SSE2-NEXT: movaps %xmm0, %xmm3 -; SSE2-NEXT: andps %xmm2, %xmm3 -; SSE2-NEXT: js .LBB33_8 -; SSE2-NEXT: # %bb.7: -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: .LBB33_8: +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload +; SSE2-NEXT: jmp .LBB33_6 +; SSE2-NEXT: .LBB33_4: ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload -; SSE2-NEXT: psrlq $48, %xmm1 -; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE2-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload -; SSE2-NEXT: psrlq $48, %xmm1 -; SSE2-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill -; SSE2-NEXT: maxss %xmm4, %xmm2 -; SSE2-NEXT: andnps %xmm2, %xmm0 -; SSE2-NEXT: orps %xmm3, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: .LBB33_6: +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: maxss %xmm1, %xmm2 +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: cmpunordss %xmm2, %xmm0 +; SSE2-NEXT: movaps %xmm0, %xmm1 +; SSE2-NEXT: andnps %xmm2, %xmm1 +; SSE2-NEXT: andps %xmm3, %xmm0 +; SSE2-NEXT: orps %xmm1, %xmm0 ; SSE2-NEXT: callq __truncsfhf2@PLT ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE2-NEXT: callq __extendhfsf2@PLT -; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE2-NEXT: callq __extendhfsf2@PLT -; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 4-byte Folded Reload -; SSE2-NEXT: # xmm4 = mem[0],zero,zero,zero ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: testl %eax, %eax -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: js .LBB33_10 -; SSE2-NEXT: # %bb.9: -; SSE2-NEXT: movdqa %xmm4, %xmm2 -; SSE2-NEXT: .LBB33_10: -; SSE2-NEXT: movdqa %xmm2, %xmm1 -; SSE2-NEXT: cmpordss %xmm2, %xmm1 -; SSE2-NEXT: movaps %xmm1, %xmm3 -; SSE2-NEXT: andps %xmm2, %xmm3 -; SSE2-NEXT: js .LBB33_12 -; SSE2-NEXT: # %bb.11: -; SSE2-NEXT: movdqa %xmm0, %xmm4 -; SSE2-NEXT: .LBB33_12: -; SSE2-NEXT: maxss %xmm4, %xmm2 +; SSE2-NEXT: js .LBB33_7 +; SSE2-NEXT: # %bb.8: +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload +; SSE2-NEXT: jmp .LBB33_9 +; SSE2-NEXT: .LBB33_7: +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: .LBB33_9: +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: maxss %xmm1, %xmm2 +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: cmpunordss %xmm2, %xmm0 +; SSE2-NEXT: movaps %xmm0, %xmm1 ; SSE2-NEXT: andnps %xmm2, %xmm1 -; SSE2-NEXT: orps %xmm3, %xmm1 -; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: andps %xmm3, %xmm0 +; SSE2-NEXT: orps %xmm1, %xmm0 ; SSE2-NEXT: callq __truncsfhf2@PLT ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; SSE2-NEXT: callq __extendhfsf2@PLT -; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill +; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE2-NEXT: callq __extendhfsf2@PLT -; SSE2-NEXT: movd (%rsp), %xmm4 # 4-byte Folded Reload -; SSE2-NEXT: # xmm4 = mem[0],zero,zero,zero -; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: testl %eax, %eax -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: js .LBB33_14 -; SSE2-NEXT: # %bb.13: -; SSE2-NEXT: movdqa %xmm4, %xmm2 -; SSE2-NEXT: .LBB33_14: -; SSE2-NEXT: movdqa %xmm2, %xmm0 -; SSE2-NEXT: cmpordss %xmm2, %xmm0 -; SSE2-NEXT: movaps %xmm0, %xmm3 -; SSE2-NEXT: andps %xmm2, %xmm3 -; SSE2-NEXT: js .LBB33_16 -; SSE2-NEXT: # %bb.15: -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: .LBB33_16: -; SSE2-NEXT: maxss %xmm4, %xmm2 -; SSE2-NEXT: andnps %xmm2, %xmm0 -; SSE2-NEXT: orps %xmm3, %xmm0 +; SSE2-NEXT: js .LBB33_10 +; SSE2-NEXT: # %bb.11: +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: movdqa (%rsp), %xmm3 # 16-byte Reload +; SSE2-NEXT: jmp .LBB33_12 +; SSE2-NEXT: .LBB33_10: +; SSE2-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: .LBB33_12: +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: maxss %xmm1, %xmm2 +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: cmpunordss %xmm2, %xmm0 +; SSE2-NEXT: movaps %xmm0, %xmm1 +; SSE2-NEXT: andnps %xmm2, %xmm1 +; SSE2-NEXT: andps %xmm3, %xmm0 +; SSE2-NEXT: orps %xmm1, %xmm0 ; SSE2-NEXT: callq __truncsfhf2@PLT ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] @@ -1641,7 +1636,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; SSE2-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; SSE2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE2-NEXT: addq $104, %rsp +; SSE2-NEXT: addq $136, %rsp ; SSE2-NEXT: retq ; ; AVX1-LABEL: test_fmaximumnum_v4f16: @@ -1679,7 +1674,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm0 ; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0 -; AVX1-NEXT: vcmpordss %xmm2, %xmm2, %xmm1 +; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm1 ; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: callq __truncsfhf2@PLT ; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -1700,7 +1695,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; AVX1-NEXT: vmovdqa %xmm0, %xmm2 ; AVX1-NEXT: .LBB33_6: ; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0 -; AVX1-NEXT: vcmpordss %xmm2, %xmm2, %xmm1 +; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm1 ; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: callq __truncsfhf2@PLT ; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -1721,7 +1716,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; AVX1-NEXT: vmovdqa %xmm0, %xmm2 ; AVX1-NEXT: .LBB33_9: ; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0 -; AVX1-NEXT: vcmpordss %xmm2, %xmm2, %xmm1 +; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm1 ; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: callq __truncsfhf2@PLT ; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill @@ -1742,7 +1737,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; AVX1-NEXT: vmovdqa %xmm0, %xmm2 ; AVX1-NEXT: .LBB33_12: ; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0 -; AVX1-NEXT: vcmpordss %xmm2, %xmm2, %xmm1 +; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm1 ; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: callq __truncsfhf2@PLT ; AVX1-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload @@ -1768,7 +1763,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1} ; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmaxss %xmm4, %xmm3, %xmm2 -; AVX512-NEXT: vcmpordss %xmm3, %xmm3, %k1 +; AVX512-NEXT: vcmpunordss %xmm2, %xmm2, %k1 ; AVX512-NEXT: vmovss %xmm3, %xmm2, %xmm2 {%k1} ; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 ; AVX512-NEXT: vshufps {{.*#+}} xmm3 = xmm0[3,3,3,3] @@ -1783,7 +1778,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} ; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1} ; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3 -; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1 +; AVX512-NEXT: vcmpunordss %xmm3, %xmm3, %k1 ; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 ; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] @@ -1799,7 +1794,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} ; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1} ; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3 -; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1 +; AVX512-NEXT: vcmpunordss %xmm3, %xmm3, %k1 ; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 ; AVX512-NEXT: vshufpd {{.*#+}} xmm4 = xmm0[1,0] @@ -1814,7 +1809,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1} ; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} ; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4 -; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1 +; AVX512-NEXT: vcmpunordss %xmm4, %xmm4, %k1 ; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1} ; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4 ; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] @@ -1831,7 +1826,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} ; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1} ; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3 -; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1 +; AVX512-NEXT: vcmpunordss %xmm3, %xmm3, %k1 ; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] @@ -1846,7 +1841,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1} ; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} ; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4 -; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1 +; AVX512-NEXT: vcmpunordss %xmm4, %xmm4, %k1 ; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1} ; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4 ; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] @@ -1860,7 +1855,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1} ; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} ; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4 -; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1 +; AVX512-NEXT: vcmpunordss %xmm4, %xmm4, %k1 ; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1} ; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0 @@ -1875,7 +1870,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; AVX512-NEXT: vmovss %xmm1, %xmm5, %xmm5 {%k1} ; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-NEXT: vmaxss %xmm5, %xmm1, %xmm0 -; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1 +; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1 ; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} ; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 ; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3] @@ -1933,7 +1928,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; X86-NEXT: vmovdqa %xmm1, %xmm0 ; X86-NEXT: .LBB33_3: ; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1 -; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill ; X86-NEXT: calll __extendhfsf2 @@ -1955,7 +1950,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; X86-NEXT: vmovdqa %xmm1, %xmm0 ; X86-NEXT: .LBB33_6: ; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1 -; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill ; X86-NEXT: calll __truncsfhf2 @@ -1993,7 +1988,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; X86-NEXT: vmovdqa %xmm1, %xmm0 ; X86-NEXT: .LBB33_9: ; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1 -; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill ; X86-NEXT: calll __extendhfsf2 @@ -2015,7 +2010,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; X86-NEXT: vmovdqa %xmm1, %xmm0 ; X86-NEXT: .LBB33_12: ; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1 -; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill ; X86-NEXT: calll __truncsfhf2 @@ -2041,120 +2036,114 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n ; SSE2-NEXT: pushq %rbp ; SSE2-NEXT: pushq %r15 ; SSE2-NEXT: pushq %r14 +; SSE2-NEXT: pushq %r13 +; SSE2-NEXT: pushq %r12 ; SSE2-NEXT: pushq %rbx ; SSE2-NEXT: subq $56, %rsp -; SSE2-NEXT: pextrw $0, %xmm1, %r14d -; SSE2-NEXT: pextrw $0, %xmm0, %r15d -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: psrld $16, %xmm2 -; SSE2-NEXT: pextrw $0, %xmm2, %eax ; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: psrld $16, %xmm2 -; SSE2-NEXT: pextrw $0, %xmm2, %ecx +; SSE2-NEXT: psrlq $48, %xmm2 +; SSE2-NEXT: movdqa %xmm1, %xmm3 +; SSE2-NEXT: psrlq $48, %xmm3 +; SSE2-NEXT: movdqa %xmm0, %xmm4 +; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm0[1,1] +; SSE2-NEXT: pextrw $0, %xmm4, %ebp +; SSE2-NEXT: movdqa %xmm1, %xmm4 +; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[1,1] +; SSE2-NEXT: pextrw $0, %xmm4, %r15d +; SSE2-NEXT: pextrw $0, %xmm0, %r12d +; SSE2-NEXT: pextrw $0, %xmm1, %r13d +; SSE2-NEXT: psrld $16, %xmm0 +; SSE2-NEXT: pextrw $0, %xmm0, %eax +; SSE2-NEXT: psrld $16, %xmm1 +; SSE2-NEXT: pextrw $0, %xmm1, %ecx ; SSE2-NEXT: shll $16, %ecx -; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: movd %ecx, %xmm1 ; SSE2-NEXT: shll $16, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: testl %ecx, %ecx -; SSE2-NEXT: movdqa %xmm3, %xmm7 -; SSE2-NEXT: js .LBB34_2 -; SSE2-NEXT: # %bb.1: -; SSE2-NEXT: movdqa %xmm2, %xmm7 -; SSE2-NEXT: .LBB34_2: -; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm1, %xmm5 -; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm1[1,1] -; SSE2-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill -; SSE2-NEXT: movdqa %xmm0, %xmm6 -; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,1],xmm0[1,1] -; SSE2-NEXT: movdqa %xmm7, %xmm0 -; SSE2-NEXT: cmpordss %xmm7, %xmm0 -; SSE2-NEXT: movaps %xmm0, %xmm4 -; SSE2-NEXT: andps %xmm7, %xmm4 -; SSE2-NEXT: js .LBB34_4 -; SSE2-NEXT: # %bb.3: -; SSE2-NEXT: movdqa %xmm3, %xmm2 -; SSE2-NEXT: .LBB34_4: -; SSE2-NEXT: pextrw $0, %xmm5, %ebp -; SSE2-NEXT: pextrw $0, %xmm6, %ebx -; SSE2-NEXT: maxss %xmm2, %xmm7 -; SSE2-NEXT: andnps %xmm7, %xmm0 -; SSE2-NEXT: orps %xmm4, %xmm0 +; SSE2-NEXT: movd %eax, %xmm4 +; SSE2-NEXT: js .LBB34_1 +; SSE2-NEXT: # %bb.2: +; SSE2-NEXT: movdqa %xmm4, %xmm0 +; SSE2-NEXT: jmp .LBB34_3 +; SSE2-NEXT: .LBB34_1: +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm4, %xmm1 +; SSE2-NEXT: .LBB34_3: +; SSE2-NEXT: pextrw $0, %xmm2, %ebx +; SSE2-NEXT: pextrw $0, %xmm3, %r14d +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: maxss %xmm0, %xmm2 +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: cmpunordss %xmm2, %xmm0 +; SSE2-NEXT: movaps %xmm0, %xmm3 +; SSE2-NEXT: andnps %xmm2, %xmm3 +; SSE2-NEXT: andps %xmm1, %xmm0 +; SSE2-NEXT: orps %xmm3, %xmm0 ; SSE2-NEXT: callq __truncsfbf2@PLT ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE2-NEXT: shll $16, %r15d -; SSE2-NEXT: movd %r15d, %xmm3 -; SSE2-NEXT: shll $16, %r14d -; SSE2-NEXT: movd %r14d, %xmm2 -; SSE2-NEXT: testl %r15d, %r15d -; SSE2-NEXT: movdqa %xmm3, %xmm1 -; SSE2-NEXT: js .LBB34_6 +; SSE2-NEXT: shll $16, %r13d +; SSE2-NEXT: movd %r13d, %xmm1 +; SSE2-NEXT: shll $16, %r12d +; SSE2-NEXT: movd %r12d, %xmm2 +; SSE2-NEXT: js .LBB34_4 ; SSE2-NEXT: # %bb.5: +; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: jmp .LBB34_6 +; SSE2-NEXT: .LBB34_4: +; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: movdqa %xmm2, %xmm1 ; SSE2-NEXT: .LBB34_6: -; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload -; SSE2-NEXT: psrlq $48, %xmm5 -; SSE2-NEXT: movdqa (%rsp), %xmm6 # 16-byte Reload -; SSE2-NEXT: psrlq $48, %xmm6 -; SSE2-NEXT: movdqa %xmm1, %xmm0 -; SSE2-NEXT: cmpordss %xmm1, %xmm0 -; SSE2-NEXT: movaps %xmm0, %xmm4 -; SSE2-NEXT: andps %xmm1, %xmm4 -; SSE2-NEXT: js .LBB34_8 -; SSE2-NEXT: # %bb.7: -; SSE2-NEXT: movdqa %xmm3, %xmm2 -; SSE2-NEXT: .LBB34_8: -; SSE2-NEXT: pextrw $0, %xmm5, %r15d -; SSE2-NEXT: pextrw $0, %xmm6, %r14d -; SSE2-NEXT: maxss %xmm2, %xmm1 -; SSE2-NEXT: andnps %xmm1, %xmm0 -; SSE2-NEXT: orps %xmm4, %xmm0 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: maxss %xmm0, %xmm2 +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: cmpunordss %xmm2, %xmm0 +; SSE2-NEXT: movaps %xmm0, %xmm3 +; SSE2-NEXT: andnps %xmm2, %xmm3 +; SSE2-NEXT: andps %xmm1, %xmm0 +; SSE2-NEXT: orps %xmm3, %xmm0 ; SSE2-NEXT: callq __truncsfbf2@PLT ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; SSE2-NEXT: shll $16, %ebx -; SSE2-NEXT: movd %ebx, %xmm1 +; SSE2-NEXT: shll $16, %r15d +; SSE2-NEXT: movd %r15d, %xmm1 ; SSE2-NEXT: shll $16, %ebp -; SSE2-NEXT: movd %ebp, %xmm3 -; SSE2-NEXT: testl %ebx, %ebx -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: js .LBB34_10 -; SSE2-NEXT: # %bb.9: -; SSE2-NEXT: movdqa %xmm3, %xmm2 -; SSE2-NEXT: .LBB34_10: +; SSE2-NEXT: movd %ebp, %xmm2 +; SSE2-NEXT: js .LBB34_7 +; SSE2-NEXT: # %bb.8: ; SSE2-NEXT: movdqa %xmm2, %xmm0 -; SSE2-NEXT: cmpordss %xmm2, %xmm0 -; SSE2-NEXT: movaps %xmm0, %xmm4 -; SSE2-NEXT: andps %xmm2, %xmm4 -; SSE2-NEXT: js .LBB34_12 -; SSE2-NEXT: # %bb.11: -; SSE2-NEXT: movdqa %xmm1, %xmm3 -; SSE2-NEXT: .LBB34_12: -; SSE2-NEXT: maxss %xmm3, %xmm2 -; SSE2-NEXT: andnps %xmm2, %xmm0 -; SSE2-NEXT: orps %xmm4, %xmm0 +; SSE2-NEXT: jmp .LBB34_9 +; SSE2-NEXT: .LBB34_7: +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm2, %xmm1 +; SSE2-NEXT: .LBB34_9: +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: maxss %xmm0, %xmm2 +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: cmpunordss %xmm2, %xmm0 +; SSE2-NEXT: movaps %xmm0, %xmm3 +; SSE2-NEXT: andnps %xmm2, %xmm3 +; SSE2-NEXT: andps %xmm1, %xmm0 +; SSE2-NEXT: orps %xmm3, %xmm0 ; SSE2-NEXT: callq __truncsfbf2@PLT ; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; SSE2-NEXT: shll $16, %r14d ; SSE2-NEXT: movd %r14d, %xmm1 -; SSE2-NEXT: shll $16, %r15d -; SSE2-NEXT: movd %r15d, %xmm3 -; SSE2-NEXT: testl %r14d, %r14d -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: js .LBB34_14 -; SSE2-NEXT: # %bb.13: -; SSE2-NEXT: movdqa %xmm3, %xmm2 -; SSE2-NEXT: .LBB34_14: +; SSE2-NEXT: shll $16, %ebx +; SSE2-NEXT: movd %ebx, %xmm2 +; SSE2-NEXT: js .LBB34_10 +; SSE2-NEXT: # %bb.11: ; SSE2-NEXT: movdqa %xmm2, %xmm0 -; SSE2-NEXT: cmpordss %xmm2, %xmm0 -; SSE2-NEXT: movaps %xmm0, %xmm4 -; SSE2-NEXT: andps %xmm2, %xmm4 -; SSE2-NEXT: js .LBB34_16 -; SSE2-NEXT: # %bb.15: -; SSE2-NEXT: movdqa %xmm1, %xmm3 -; SSE2-NEXT: .LBB34_16: -; SSE2-NEXT: maxss %xmm3, %xmm2 -; SSE2-NEXT: andnps %xmm2, %xmm0 -; SSE2-NEXT: orps %xmm4, %xmm0 +; SSE2-NEXT: jmp .LBB34_12 +; SSE2-NEXT: .LBB34_10: +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm2, %xmm1 +; SSE2-NEXT: .LBB34_12: +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: maxss %xmm0, %xmm2 +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: cmpunordss %xmm2, %xmm0 +; SSE2-NEXT: movaps %xmm0, %xmm3 +; SSE2-NEXT: andnps %xmm2, %xmm3 +; SSE2-NEXT: andps %xmm1, %xmm0 +; SSE2-NEXT: orps %xmm3, %xmm0 ; SSE2-NEXT: callq __truncsfbf2@PLT ; SSE2-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] @@ -2164,6 +2153,8 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: addq $56, %rsp ; SSE2-NEXT: popq %rbx +; SSE2-NEXT: popq %r12 +; SSE2-NEXT: popq %r13 ; SSE2-NEXT: popq %r14 ; SSE2-NEXT: popq %r15 ; SSE2-NEXT: popq %rbp @@ -2205,7 +2196,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n ; AVX1-NEXT: vpextrw $0, %xmm2, %ebp ; AVX1-NEXT: vpextrw $0, %xmm3, %r15d ; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1 -; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: callq __truncsfbf2@PLT ; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -2222,7 +2213,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n ; AVX1-NEXT: vmovdqa %xmm2, %xmm0 ; AVX1-NEXT: .LBB34_6: ; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1 -; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: callq __truncsfbf2@PLT ; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -2239,7 +2230,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n ; AVX1-NEXT: vmovdqa %xmm2, %xmm0 ; AVX1-NEXT: .LBB34_9: ; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1 -; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: callq __truncsfbf2@PLT ; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill @@ -2256,7 +2247,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n ; AVX1-NEXT: vmovdqa %xmm2, %xmm0 ; AVX1-NEXT: .LBB34_12: ; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1 -; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: callq __truncsfbf2@PLT ; AVX1-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload @@ -2305,7 +2296,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n ; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} ; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0 -; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1 +; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1 ; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} ; AVX512-NEXT: callq __truncsfbf2@PLT ; AVX512-NEXT: vpextrw $0, %xmm0, {{[0-9]+}}(%rsp) @@ -2319,7 +2310,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n ; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} ; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0 -; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1 +; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1 ; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} ; AVX512-NEXT: callq __truncsfbf2@PLT ; AVX512-NEXT: vpextrw $0, %xmm0, (%rsp) @@ -2333,7 +2324,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n ; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} ; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0 -; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1 +; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1 ; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} ; AVX512-NEXT: callq __truncsfbf2@PLT ; AVX512-NEXT: vpextrw $0, %xmm0, {{[0-9]+}}(%rsp) @@ -2347,7 +2338,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n ; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} ; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0 -; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1 +; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1 ; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} ; AVX512-NEXT: callq __truncsfbf2@PLT ; AVX512-NEXT: vpextrw $0, %xmm0, {{[0-9]+}}(%rsp) @@ -2400,7 +2391,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n ; X86-NEXT: vpextrw $0, %xmm2, %edi ; X86-NEXT: vpextrw $0, %xmm3, %ebp ; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1 -; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: vmovss %xmm0, (%esp) ; X86-NEXT: shll $16, %ecx @@ -2416,7 +2407,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n ; X86-NEXT: vmovdqa %xmm2, %xmm0 ; X86-NEXT: .LBB34_6: ; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1 -; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill ; X86-NEXT: calll __truncsfbf2 @@ -2436,7 +2427,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n ; X86-NEXT: vmovdqa %xmm2, %xmm0 ; X86-NEXT: .LBB34_9: ; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1 -; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill ; X86-NEXT: calll __truncsfbf2 @@ -2456,7 +2447,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n ; X86-NEXT: vmovdqa %xmm2, %xmm0 ; X86-NEXT: .LBB34_12: ; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1 -; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2 +; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2 ; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 ; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill ; X86-NEXT: calll __truncsfbf2 diff --git a/llvm/test/CodeGen/X86/fp-undef.ll b/llvm/test/CodeGen/X86/fp-undef.ll index 227f007..c358085 100644 --- a/llvm/test/CodeGen/X86/fp-undef.ll +++ b/llvm/test/CodeGen/X86/fp-undef.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ANY -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -enable-unsafe-fp-math | FileCheck %s --check-prefix=ANY +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ANY ; This is duplicated from tests for InstSimplify. If you're ; adding something here, you should probably add it there too. diff --git a/llvm/test/CodeGen/X86/fp128-select.ll b/llvm/test/CodeGen/X86/fp128-select.ll index 659e4dd..27a651e 100644 --- a/llvm/test/CodeGen/X86/fp128-select.ll +++ b/llvm/test/CodeGen/X86/fp128-select.ll @@ -13,8 +13,8 @@ define void @test_select(ptr %p, ptr %q, i1 zeroext %c) nounwind { ; SSE: # %bb.0: ; SSE-NEXT: testl %edx, %edx ; SSE-NEXT: jne .LBB0_1 -; SSE-NEXT: # %bb.3: -; SSE-NEXT: movaps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE-NEXT: # %bb.2: +; SSE-NEXT: movaps {{.*#+}} xmm0 = [NaN] ; SSE-NEXT: movaps %xmm0, (%rsi) ; SSE-NEXT: retq ; SSE-NEXT: .LBB0_1: @@ -58,7 +58,7 @@ define fp128 @test_select_cc(fp128, fp128) nounwind { ; SSE-NEXT: xorps %xmm1, %xmm1 ; SSE-NEXT: jmp .LBB1_3 ; SSE-NEXT: .LBB1_1: -; SSE-NEXT: movaps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.0E+0] ; SSE-NEXT: .LBB1_3: # %BB0 ; SSE-NEXT: testl %ebx, %ebx ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload diff --git a/llvm/test/CodeGen/X86/fsxor-alignment.ll b/llvm/test/CodeGen/X86/fsxor-alignment.ll index 6fa4a31..32af5b9 100644 --- a/llvm/test/CodeGen/X86/fsxor-alignment.ll +++ b/llvm/test/CodeGen/X86/fsxor-alignment.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 -enable-unsafe-fp-math | FileCheck %s +; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | FileCheck %s ; Don't fold the incoming stack arguments into the xorps instructions used ; to do floating-point negations, because the arguments aren't vectors diff --git a/llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll b/llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll index f710a30..bd997d1 100644 --- a/llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll +++ b/llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=sse -enable-unsafe-fp-math < %s | FileCheck %s +; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=sse < %s | FileCheck %s ; The debug info in this test case was causing a crash because machine trace metrics ; did not correctly ignore debug instructions. The check lines ensure that the diff --git a/llvm/test/CodeGen/X86/neg_fp.ll b/llvm/test/CodeGen/X86/neg_fp.ll index 8020982..18ded50 100644 --- a/llvm/test/CodeGen/X86/neg_fp.ll +++ b/llvm/test/CodeGen/X86/neg_fp.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=i686-- -mattr=+sse4.1 | FileCheck %s -; Test that when we don't -enable-unsafe-fp-math, we don't do the optimization +; Test that when we don't, we don't do the optimization ; -0 - (A - B) to (B - A) because A==B, -0 != 0 define float @negfp(float %a, float %b) nounwind { diff --git a/llvm/test/CodeGen/X86/negate-add-zero.ll b/llvm/test/CodeGen/X86/negate-add-zero.ll index eb4e2d3..4884832 100644 --- a/llvm/test/CodeGen/X86/negate-add-zero.ll +++ b/llvm/test/CodeGen/X86/negate-add-zero.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -enable-unsafe-fp-math | FileCheck %s +; RUN: llc < %s | FileCheck %s ; PR3374 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" diff --git a/llvm/test/CodeGen/X86/recip-pic.ll b/llvm/test/CodeGen/X86/recip-pic.ll index d01ecc1..d2620e7 100644 --- a/llvm/test/CodeGen/X86/recip-pic.ll +++ b/llvm/test/CodeGen/X86/recip-pic.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -enable-unsafe-fp-math -mcpu=slm -relocation-model=pic | FileCheck %s --check-prefix=CHECK +; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mcpu=slm -relocation-model=pic | FileCheck %s --check-prefix=CHECK define fastcc float @foo(float %x) unnamed_addr #0 { ; CHECK-LABEL: foo: diff --git a/llvm/test/CodeGen/X86/sincos-opt.ll b/llvm/test/CodeGen/X86/sincos-opt.ll index 6885456..51f3e52 100644 --- a/llvm/test/CodeGen/X86/sincos-opt.ll +++ b/llvm/test/CodeGen/X86/sincos-opt.ll @@ -1,10 +1,10 @@ ; RUN: llc < %s -mtriple=x86_64-apple-macosx10.9.0 -mcpu=core2 | FileCheck %s --check-prefix=OSX_SINCOS ; RUN: llc < %s -mtriple=x86_64-apple-macosx10.8.0 -mcpu=core2 | FileCheck %s --check-prefix=OSX_NOOPT ; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS -; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -mcpu=core2 -enable-unsafe-fp-math | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH -; RUN: llc < %s -mtriple=x86_64-pc-linux-gnux32 -mcpu=core2 -enable-unsafe-fp-math | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH +; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH +; RUN: llc < %s -mtriple=x86_64-pc-linux-gnux32 -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH ; RUN: llc < %s -mtriple=x86_64-fuchsia -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS -; RUN: llc < %s -mtriple=x86_64-fuchsia -mcpu=core2 -enable-unsafe-fp-math | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH +; RUN: llc < %s -mtriple=x86_64-fuchsia -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH ; RUN: llc < %s -mtriple=x86_64-scei-ps4 -mcpu=btver2 | FileCheck %s --check-prefix=PS4_SINCOS ; RUN: llc < %s -mtriple=x86_64-sie-ps5 -mcpu=znver2 | FileCheck %s --check-prefix=PS4_SINCOS diff --git a/llvm/test/CodeGen/X86/sincos.ll b/llvm/test/CodeGen/X86/sincos.ll index 7903407..9206c25 100644 --- a/llvm/test/CodeGen/X86/sincos.ll +++ b/llvm/test/CodeGen/X86/sincos.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; Make sure this testcase codegens to the sin and cos instructions, not calls -; RUN: llc < %s -mtriple=i686-apple-macosx -mattr=-sse,-sse2,-sse3 -enable-unsafe-fp-math | FileCheck %s +; RUN: llc < %s -mtriple=i686-apple-macosx -mattr=-sse,-sse2,-sse3 | FileCheck %s ; RUN: llc < %s -mtriple=i686-apple-macosx -mattr=-sse,-sse2,-sse3 | FileCheck %s declare float @sinf(float) readonly diff --git a/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll b/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll index c0beb6f..2822d40 100644 --- a/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll +++ b/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll @@ -1,9 +1,9 @@ -; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math | FileCheck %s --check-prefix=CST --check-prefix=SSE2 -; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+sse4.1 | FileCheck %s --check-prefix=CST --check-prefix=SSE41 -; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx | FileCheck %s --check-prefix=CST --check-prefix=AVX -; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx2 | FileCheck %s --check-prefix=AVX2 -; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F -; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512vl | FileCheck %s --check-prefix=AVX512VL +; RUN: llc < %s -mtriple=x86_64 | FileCheck %s --check-prefix=CST --check-prefix=SSE2 +; RUN: llc < %s -mtriple=x86_64 -mattr=+sse4.1 | FileCheck %s --check-prefix=CST --check-prefix=SSE41 +; RUN: llc < %s -mtriple=x86_64 -mattr=+avx | FileCheck %s --check-prefix=CST --check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64 -mattr=+avx2 | FileCheck %s --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64 -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64 -mattr=+avx512vl | FileCheck %s --check-prefix=AVX512VL ; Check that the constant used in the vectors are the right ones. ; SSE2: [[MASKCSTADDR:.LCPI[0-9_]+]]: diff --git a/llvm/test/Instrumentation/AllocToken/basic.ll b/llvm/test/Instrumentation/AllocToken/basic.ll index 099d37d..0c34b137 100644 --- a/llvm/test/Instrumentation/AllocToken/basic.ll +++ b/llvm/test/Instrumentation/AllocToken/basic.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt < %s -passes=inferattrs,alloc-token -alloc-token-mode=increment -S | FileCheck %s +; RUN: opt < %s -passes='inferattrs,alloc-token<mode=increment>' -S | FileCheck %s target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Instrumentation/AllocToken/basic32.ll b/llvm/test/Instrumentation/AllocToken/basic32.ll index 944a452..52d1d14 100644 --- a/llvm/test/Instrumentation/AllocToken/basic32.ll +++ b/llvm/test/Instrumentation/AllocToken/basic32.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt < %s -passes=inferattrs,alloc-token -alloc-token-mode=increment -S | FileCheck %s +; RUN: opt < %s -passes='inferattrs,alloc-token<mode=increment>' -S | FileCheck %s target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128" diff --git a/llvm/test/Instrumentation/AllocToken/fast.ll b/llvm/test/Instrumentation/AllocToken/fast.ll index 19a3ef6..f6bf5ee 100644 --- a/llvm/test/Instrumentation/AllocToken/fast.ll +++ b/llvm/test/Instrumentation/AllocToken/fast.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt < %s -passes=inferattrs,alloc-token -alloc-token-mode=increment -alloc-token-fast-abi -alloc-token-max=3 -S | FileCheck %s +; RUN: opt < %s -passes='inferattrs,alloc-token<mode=increment>' -alloc-token-fast-abi -alloc-token-max=3 -S | FileCheck %s target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Instrumentation/AllocToken/intrinsic.ll b/llvm/test/Instrumentation/AllocToken/intrinsic.ll index 13aaa90..5c6f2f1 100644 --- a/llvm/test/Instrumentation/AllocToken/intrinsic.ll +++ b/llvm/test/Instrumentation/AllocToken/intrinsic.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; Test that the alloc-token pass lowers the intrinsic to a constant token ID. ; -; RUN: opt < %s -passes=alloc-token -alloc-token-mode=typehashpointersplit -alloc-token-max=2 -S | FileCheck %s +; RUN: opt < %s -passes='alloc-token<mode=typehashpointersplit>' -alloc-token-max=2 -S | FileCheck %s target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Instrumentation/AllocToken/intrinsic32.ll b/llvm/test/Instrumentation/AllocToken/intrinsic32.ll index eb5dbbe..15f7c25 100644 --- a/llvm/test/Instrumentation/AllocToken/intrinsic32.ll +++ b/llvm/test/Instrumentation/AllocToken/intrinsic32.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; Test that the alloc-token pass lowers the intrinsic to a constant token ID. ; -; RUN: opt < %s -passes=alloc-token -alloc-token-mode=typehashpointersplit -alloc-token-max=2 -S | FileCheck %s +; RUN: opt < %s -passes='alloc-token<mode=typehashpointersplit>' -alloc-token-max=2 -S | FileCheck %s target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128" target triple = "i386-pc-linux-gnu" diff --git a/llvm/test/Instrumentation/AllocToken/invoke.ll b/llvm/test/Instrumentation/AllocToken/invoke.ll index 347c99a..8e7ab38 100644 --- a/llvm/test/Instrumentation/AllocToken/invoke.ll +++ b/llvm/test/Instrumentation/AllocToken/invoke.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt < %s -passes=inferattrs,alloc-token -alloc-token-mode=increment -S | FileCheck %s +; RUN: opt < %s -passes='inferattrs,alloc-token<mode=increment>' -S | FileCheck %s target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Instrumentation/AllocToken/nonlibcalls.ll b/llvm/test/Instrumentation/AllocToken/nonlibcalls.ll index 19673da..45f573e 100644 --- a/llvm/test/Instrumentation/AllocToken/nonlibcalls.ll +++ b/llvm/test/Instrumentation/AllocToken/nonlibcalls.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt < %s -passes=inferattrs,alloc-token -alloc-token-mode=increment -alloc-token-extended -S | FileCheck %s +; RUN: opt < %s -passes='inferattrs,alloc-token<mode=increment>' -alloc-token-extended -S | FileCheck %s target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Instrumentation/AllocToken/typehashpointersplit.ll b/llvm/test/Instrumentation/AllocToken/typehashpointersplit.ll index 1f77648..4d1be5e 100644 --- a/llvm/test/Instrumentation/AllocToken/typehashpointersplit.ll +++ b/llvm/test/Instrumentation/AllocToken/typehashpointersplit.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt < %s -passes=inferattrs,alloc-token -alloc-token-mode=typehashpointersplit -alloc-token-max=2 -S | FileCheck %s +; RUN: opt < %s -passes='inferattrs,alloc-token<mode=typehashpointersplit>' -alloc-token-max=2 -S | FileCheck %s target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll index 1ddcd4b..1c869bd 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -passes=msan -mattr=+sme -o - %s | FileCheck %s +; RUN: opt -S -passes=msan -mattr=+sme -o - %s ; XFAIL: * diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount.ll index 9caa89d..00cf3204 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -passes=msan -mattr=+sme -o - %s | FileCheck %s +; RUN: opt -S -passes=msan -mattr=+sme -o - %s ; XFAIL: * diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll new file mode 100644 index 0000000..3f43efa --- /dev/null +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll @@ -0,0 +1,16 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -passes=msan -mattr=+sme2 -mattr=+sme-i16i64 -mattr=+sme-f64f64 -o - %s + +; XFAIL: * + +; Forked from llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll +; Manually reduced to show MSan leads to a compiler crash + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64--linux-android9001" + +define void @multi_vector_add_za_vg1x4_f32_tuple(i64 %stride, ptr %ptr) sanitize_memory { + %1 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() + %2 = tail call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %1, ptr %ptr) + ret void +} diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add.ll new file mode 100644 index 0000000..cd04373 --- /dev/null +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add.ll @@ -0,0 +1,340 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -passes=msan -mattr=+sme2 -mattr=+sme-i16i64 -mattr=+sme-f64f64 -o - %s + +; XFAIL: * + +; Forked from llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64--linux-android9001" + +define void @multi_vector_add_write_single_za_vg1x2_i32(i32 %slice, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zm) sanitize_memory { + call void @llvm.aarch64.sme.add.write.single.za.vg1x2.nxv4i32(i32 %slice, + <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, + <vscale x 4 x i32> %zm) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.single.za.vg1x2.nxv4i32(i32 %slice.7, + <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, + <vscale x 4 x i32> %zm) + ret void +} + +define void @multi_vector_add_write_single_za_vg1x2_i64(i32 %slice, <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zm) sanitize_memory { + call void @llvm.aarch64.sme.add.write.single.za.vg1x2.nxv2i64(i32 %slice, + <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, + <vscale x 2 x i64> %zm) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.single.za.vg1x2.nxv2i64(i32 %slice.7, + <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, + <vscale x 2 x i64> %zm) + ret void +} + + +define void @multi_vector_add_write_single_za_vg1x4_i32(i32 %slice, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, + <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, + <vscale x 4 x i32> %zm) sanitize_memory { + call void @llvm.aarch64.sme.add.write.single.za.vg1x4.nxv4i32(i32 %slice, + <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, + <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, + <vscale x 4 x i32> %zm) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.single.za.vg1x4.nxv4i32(i32 %slice.7, + <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, + <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, + <vscale x 4 x i32> %zm) + ret void +} + +define void @multi_vector_add_write_single_za_vg1x4_i64(i32 %slice, + <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, + <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, + <vscale x 2 x i64> %zm) sanitize_memory { + call void @llvm.aarch64.sme.add.write.single.za.vg1x4.nxv2i64(i32 %slice, + <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, + <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, + <vscale x 2 x i64> %zm) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.single.za.vg1x4.nxv2i64(i32 %slice.7, + <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, + <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, + <vscale x 2 x i64> %zm) + ret void +} + + +define void @multi_vector_add_write_za_vg1x2_i32(i32 %slice, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, + <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2) sanitize_memory { + call void @llvm.aarch64.sme.add.write.za.vg1x2.nxv4i32(i32 %slice, + <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, + <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.za.vg1x2.nxv4i32(i32 %slice.7, + <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, + <vscale x 4 x i32> %zm1, <vscale x 4 x i32> %zm2) + ret void +} + + +define void @multi_vector_add_write_za_vg1x2_i64(i32 %slice, <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, + <vscale x 2 x i64> %zm1, <vscale x 2 x i64> %zm2) sanitize_memory { + call void @llvm.aarch64.sme.add.write.za.vg1x2.nxv2i64(i32 %slice, + <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, + <vscale x 2 x i64> %zm1, <vscale x 2 x i64> %zm2) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.za.vg1x2.nxv2i64(i32 %slice.7, + <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, + <vscale x 2 x i64> %zm1, <vscale x 2 x i64> %zm2) + ret void +} + + + +define void @multi_vector_add_write_za_vg1x4_i32(i32 %slice, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, + <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, + <vscale x 4 x i32> %zm0, <vscale x 4 x i32> %zm1, + <vscale x 4 x i32> %zm2, <vscale x 4 x i32> %zm3) sanitize_memory { + call void @llvm.aarch64.sme.add.write.za.vg1x4.nxv4i32(i32 %slice, + <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, + <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, + <vscale x 4 x i32> %zm0, <vscale x 4 x i32> %zm1, + <vscale x 4 x i32> %zm2, <vscale x 4 x i32> %zm3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.za.vg1x4.nxv4i32(i32 %slice.7, + <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, + <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, + <vscale x 4 x i32> %zm0, <vscale x 4 x i32> %zm1, + <vscale x 4 x i32> %zm2, <vscale x 4 x i32> %zm3) + ret void +} + +define void @multi_vector_add_write_za_vg1x4_i64(i32 %slice, <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, + <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, + <vscale x 2 x i64> %zm0, <vscale x 2 x i64> %zm1, + <vscale x 2 x i64> %zm2, <vscale x 2 x i64> %zm3) sanitize_memory { + call void @llvm.aarch64.sme.add.write.za.vg1x4.nxv2i64(i32 %slice, + <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, + <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, + <vscale x 2 x i64> %zm0, <vscale x 2 x i64> %zm1, + <vscale x 2 x i64> %zm2, <vscale x 2 x i64> %zm3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.za.vg1x4.nxv2i64(i32 %slice.7, + <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, + <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, + <vscale x 2 x i64> %zm0, <vscale x 2 x i64> %zm1, + <vscale x 2 x i64> %zm2, <vscale x 2 x i64> %zm3) + ret void +} + +define void @multi_vector_add_za_vg1x2_i32(i32 %slice, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1) sanitize_memory { + call void @llvm.aarch64.sme.add.za32.vg1x2.nxv4i32(i32 %slice,<vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.za32.vg1x2.nxv4i32(i32 %slice.7, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1) + ret void +} + +define void @multi_vector_add_za_vg1x2_i64(i32 %slice, <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1) sanitize_memory { + call void @llvm.aarch64.sme.add.za64.vg1x2.nxv2i64(i32 %slice, <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.za64.vg1x2.nxv2i64(i32 %slice.7, <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1) + ret void +} + +define void @multi_vector_add_za_vg1x2_f32(i32 %slice, <vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1) sanitize_memory { + call void @llvm.aarch64.sme.add.za32.vg1x2.nxv4f32(i32 %slice, + <vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.za32.vg1x2.nxv4f32(i32 %slice.7, + <vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1) + ret void +} + +define void @multi_vector_add_za_vg1x2_f64(i32 %slice, <vscale x 2 x double> %zn0, <vscale x 2 x double> %zn1) sanitize_memory { + call void @llvm.aarch64.sme.add.za64.vg1x2.nxv2f64(i32 %slice, + <vscale x 2 x double> %zn0, <vscale x 2 x double> %zn1) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.za64.vg1x2.nxv2f64(i32 %slice.7, + <vscale x 2 x double> %zn0, <vscale x 2 x double> %zn1) + ret void +} + +define void @multi_vector_add_za_vg1x2_f64_tuple(i64 %stride, ptr %ptr) sanitize_memory { +entry: + %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() + %1 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld1.pn.x2.nxv2f64(target("aarch64.svcount") %0, ptr %ptr) + %2 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %1, 0 + %3 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %1, 1 + %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride + %4 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld1.pn.x2.nxv2f64(target("aarch64.svcount") %0, ptr %arrayidx2) + %5 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %4, 0 + %6 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %4, 1 + call void @llvm.aarch64.sme.add.za64.vg1x2.nxv2f64(i32 0, <vscale x 2 x double> %2, <vscale x 2 x double> %5) + call void @llvm.aarch64.sme.add.za64.vg1x2.nxv2f64(i32 0, <vscale x 2 x double> %3, <vscale x 2 x double> %6) + ret void +} + + +define void @multi_vector_add_za_vg1x4_i32(i32 %slice, <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3) sanitize_memory { + call void @llvm.aarch64.sme.add.za32.vg1x4.nxv4i32(i32 %slice, + <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, + <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.za32.vg1x4.nxv4i32(i32 %slice.7, + <vscale x 4 x i32> %zn0, <vscale x 4 x i32> %zn1, + <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3) + ret void +} + +define void @multi_vector_add_za_vg1x4_i64(i32 %slice, <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3) sanitize_memory { + call void @llvm.aarch64.sme.add.za64.vg1x4.nxv2i64(i32 %slice, + <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, + <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.za64.vg1x4.nxv2i64(i32 %slice.7, + <vscale x 2 x i64> %zn0, <vscale x 2 x i64> %zn1, + <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3) + ret void +} + +define void @multi_vector_add_za_vg1x4_f32(i32 %slice, <vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3) sanitize_memory { + call void @llvm.aarch64.sme.add.za32.vg1x4.nxv4f32(i32 %slice, + <vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1, + <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.za32.vg1x4.nxv4f32(i32 %slice.7, + <vscale x 4 x float> %zn0, <vscale x 4 x float> %zn1, + <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3) + ret void +} + +define void @multi_vector_add_za_vg1x4_f32_tuple(i64 %stride, ptr %ptr) sanitize_memory { +entry: + %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() + %1 = tail call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %0, ptr %ptr) + %2 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %1, 0 + %3 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %1, 1 + %4 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %1, 2 + %5 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %1, 3 + %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride + %6 = tail call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %0, ptr %arrayidx2) + %7 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %6, 0 + %8 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %6, 1 + %9 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %6, 2 + %10 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %6, 3 + %mul3 = shl i64 %stride, 1 + %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 + %11 = tail call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %0, ptr %arrayidx4) + %12 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %11, 0 + %13 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %11, 1 + %14 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %11, 2 + %15 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %11, 3 + %mul5 = mul i64 %stride, 3 + %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 + %16 = tail call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %0, ptr %arrayidx6) + %17 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %16, 0 + %18 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %16, 1 + %19 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %16, 2 + %20 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %16, 3 + call void @llvm.aarch64.sme.add.za32.vg1x4.nxv4f32(i32 0, <vscale x 4 x float> %2, <vscale x 4 x float> %7, <vscale x 4 x float> %12, <vscale x 4 x float> %17) + call void @llvm.aarch64.sme.add.za32.vg1x4.nxv4f32(i32 0, <vscale x 4 x float> %3, <vscale x 4 x float> %8, <vscale x 4 x float> %13, <vscale x 4 x float> %18) + call void @llvm.aarch64.sme.add.za32.vg1x4.nxv4f32(i32 0, <vscale x 4 x float> %4, <vscale x 4 x float> %9, <vscale x 4 x float> %14, <vscale x 4 x float> %19) + call void @llvm.aarch64.sme.add.za32.vg1x4.nxv4f32(i32 0, <vscale x 4 x float> %5, <vscale x 4 x float> %10, <vscale x 4 x float> %15, <vscale x 4 x float> %20) + ret void +} + +define void @multi_vector_add_za_vg1x4_f64(i32 %slice, <vscale x 2 x double> %zn0, <vscale x 2 x double> %zn1, <vscale x 2 x double> %zn2, <vscale x 2 x double> %zn3) sanitize_memory { + call void @llvm.aarch64.sme.add.za64.vg1x4.nxv2f64(i32 %slice, + <vscale x 2 x double> %zn0, <vscale x 2 x double> %zn1, + <vscale x 2 x double> %zn2, <vscale x 2 x double> %zn3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.za64.vg1x4.nxv2f64(i32 %slice.7, + <vscale x 2 x double> %zn0, <vscale x 2 x double> %zn1, + <vscale x 2 x double> %zn2, <vscale x 2 x double> %zn3) + ret void +} + + +define { <vscale x 16 x i8>, <vscale x 16 x i8> } @multi_vec_add_single_x2_s8(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %zdn1, <vscale x 16 x i8> %zdn2, <vscale x 16 x i8> %zm) sanitize_memory { + %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } + @llvm.aarch64.sve.add.single.x2.nxv16i8(<vscale x 16 x i8> %zdn1, <vscale x 16 x i8> %zdn2, + <vscale x 16 x i8> %zm) + ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res +} + +define { <vscale x 8 x i16>, <vscale x 8 x i16> } @multi_vec_add_single_x2_s16(<vscale x 8 x i16> %unused, <vscale x 8 x i16> %zdn1, <vscale x 8 x i16> %zdn2, <vscale x 8 x i16> %zm) sanitize_memory { + %res = call { <vscale x 8 x i16>, <vscale x 8 x i16> } + @llvm.aarch64.sve.add.single.x2.nxv8i16(<vscale x 8 x i16> %zdn1, <vscale x 8 x i16> %zdn2, + <vscale x 8 x i16> %zm) + ret { <vscale x 8 x i16>, <vscale x 8 x i16> } %res +} + +define { <vscale x 4 x i32>, <vscale x 4 x i32> } @multi_vec_add_single_x2_s32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zm) sanitize_memory { + %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } + @llvm.aarch64.sve.add.single.x2.nxv4i32(<vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, + <vscale x 4 x i32> %zm) + ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res +} + +define { <vscale x 2 x i64>, <vscale x 2 x i64> } @multi_vec_add_single_x2_s64(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zdn1, <vscale x 2 x i64> %zdn2, <vscale x 2 x i64> %zm) sanitize_memory { + %res = call { <vscale x 2 x i64>, <vscale x 2 x i64> } + @llvm.aarch64.sve.add.single.x2.nxv2i64(<vscale x 2 x i64> %zdn1, <vscale x 2 x i64> %zdn2, + <vscale x 2 x i64> %zm) + ret { <vscale x 2 x i64>, <vscale x 2 x i64> } %res +} + + +define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @multi_vec_add_single_x4_s8(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %zdn1, <vscale x 16 x i8> %zdn2, <vscale x 16 x i8> %zdn3, <vscale x 16 x i8> %zdn4, <vscale x 16 x i8>%zm) sanitize_memory { + %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } + @llvm.aarch64.sve.add.single.x4.nxv16i8(<vscale x 16 x i8> %zdn1, <vscale x 16 x i8> %zdn2, + <vscale x 16 x i8> %zdn3, <vscale x 16 x i8> %zdn4, + <vscale x 16 x i8> %zm) + ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res +} + +define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @multi_vec_add_x4_single_s16(<vscale x 8 x i16> %unused, <vscale x 8 x i16> %zdn1, <vscale x 8 x i16> %zdn2, <vscale x 8 x i16> %zdn3, <vscale x 8 x i16> %zdn4, <vscale x 8 x i16> %zm) sanitize_memory { + %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } + @llvm.aarch64.sve.add.single.x4.nxv8i16(<vscale x 8 x i16> %zdn1, <vscale x 8 x i16> %zdn2, + <vscale x 8 x i16> %zdn3, <vscale x 8 x i16> %zdn4, + <vscale x 8 x i16> %zm) + ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res +} + +define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @multi_vec_add_x4_single_s32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, <vscale x 4 x i32> %zdn3, <vscale x 4 x i32> %zdn4, <vscale x 4 x i32> %zm) sanitize_memory { + %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } + @llvm.aarch64.sve.add.single.x4.nxv4i32(<vscale x 4 x i32> %zdn1, <vscale x 4 x i32> %zdn2, + <vscale x 4 x i32> %zdn3, <vscale x 4 x i32> %zdn4, + <vscale x 4 x i32> %zm) + ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res +} + +define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @multi_vec_add_x4_single_s64(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zdn1, <vscale x 2 x i64> %zdn2, <vscale x 2 x i64> %zdn3, <vscale x 2 x i64> %zdn4, <vscale x 2 x i64> %zm) sanitize_memory { + %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } + @llvm.aarch64.sve.add.single.x4.nxv2i64(<vscale x 2 x i64> %zdn1, <vscale x 2 x i64> %zdn2, + <vscale x 2 x i64> %zdn3, <vscale x 2 x i64> %zdn4, + <vscale x 2 x i64> %zm) + ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res +} +declare void@llvm.aarch64.sme.add.write.single.za.vg1x2.nxv4i32(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>) +declare void@llvm.aarch64.sme.add.write.single.za.vg1x2.nxv2i64(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>) +declare void@llvm.aarch64.sme.add.write.single.za.vg1x4.nxv4i32(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>) +declare void@llvm.aarch64.sme.add.write.single.za.vg1x4.nxv2i64(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>) +declare void@llvm.aarch64.sme.add.write.za.vg1x2.nxv4i32(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>) +declare void@llvm.aarch64.sme.add.write.za.vg1x2.nxv2i64(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>) +declare void@llvm.aarch64.sme.add.write.za.vg1x4.nxv4i32(i32, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>) +declare void@llvm.aarch64.sme.add.write.za.vg1x4.nxv2i64(i32, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>) +declare void@llvm.aarch64.sme.add.za32.vg1x2.nxv4i32(i32, <vscale x 4 x i32>,<vscale x 4 x i32>) +declare void@llvm.aarch64.sme.add.za64.vg1x2.nxv2i64(i32, <vscale x 2 x i64>,<vscale x 2 x i64>) +declare void@llvm.aarch64.sme.add.za32.vg1x4.nxv4i32(i32, <vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>) +declare void@llvm.aarch64.sme.add.za64.vg1x4.nxv2i64(i32, <vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, <vscale x 2 x i64>) +declare void@llvm.aarch64.sme.add.za32.vg1x2.nxv4f32(i32, <vscale x 4 x float>, <vscale x 4 x float>) +declare void@llvm.aarch64.sme.add.za64.vg1x2.nxv2f64(i32, <vscale x 2 x double>, <vscale x 2 x double>) +declare void@llvm.aarch64.sme.add.za32.vg1x4.nxv4f32(i32, <vscale x 4 x float>, <vscale x 4 x float>,<vscale x 4 x float>, <vscale x 4 x float>) +declare void@llvm.aarch64.sme.add.za64.vg1x4.nxv2f64(i32, <vscale x 2 x double>, <vscale x 2 x double>,<vscale x 2 x double>, <vscale x 2 x double>) +declare { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.add.single.x2.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>) +declare { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.add.single.x2.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>) +declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.add.single.x2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>) +declare { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.add.single.x2.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>) +declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.add.single.x4.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>) +declare { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.add.single.x4.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>) +declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.add.single.x4.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>) +declare { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.add.single.x4.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>) diff --git a/llvm/test/Instrumentation/NumericalStabilitySanitizer/basic.ll b/llvm/test/Instrumentation/NumericalStabilitySanitizer/basic.ll index 434ac84..3d759f7 100644 --- a/llvm/test/Instrumentation/NumericalStabilitySanitizer/basic.ll +++ b/llvm/test/Instrumentation/NumericalStabilitySanitizer/basic.ll @@ -865,33 +865,6 @@ entry: ret float %r } -; Note that the `unsafe-fp-math` from the function attributes should be moved to -; individual instructions, with the shadow instructions NOT getting the attribute. -define float @param_add_return_float_unsafe_fp_math(float %a) #0 { -; CHECK-LABEL: @param_add_return_float_unsafe_fp_math( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__nsan_shadow_args_tag, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[TMP0]], ptrtoint (ptr @param_add_return_float_unsafe_fp_math to i64) -; CHECK-NEXT: [[TMP2:%.*]] = load double, ptr @__nsan_shadow_args_ptr, align 1 -; CHECK-NEXT: [[TMP3:%.*]] = fpext float [[A:%.*]] to double -; CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP1]], double [[TMP2]], double [[TMP3]] -; CHECK-NEXT: store i64 0, ptr @__nsan_shadow_args_tag, align 8 -; CHECK-NEXT: [[B:%.*]] = fadd fast float [[A]], 1.000000e+00 -; CHECK-NEXT: [[TMP5:%.*]] = fadd double [[TMP4]], 1.000000e+00 -; CHECK-NEXT: [[TMP6:%.*]] = call i32 @__nsan_internal_check_float_d(float [[B]], double [[TMP5]], i32 1, i64 0) -; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[TMP6]], 1 -; CHECK-NEXT: [[TMP8:%.*]] = fpext float [[B]] to double -; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP7]], double [[TMP8]], double [[TMP5]] -; CHECK-NEXT: store i64 ptrtoint (ptr @param_add_return_float_unsafe_fp_math to i64), ptr @__nsan_shadow_ret_tag, align 8 -; CHECK-NEXT: store double [[TMP9]], ptr @__nsan_shadow_ret_ptr, align 8 -; CHECK-NEXT: ret float [[B]] -; -entry: - %b = fadd float %a, 1.0 - ret float %b -} - - define void @truncate(<2 x double> %0) sanitize_numerical_stability { ; DQQ-LABEL: @truncate( ; DQQ-NEXT: entry: @@ -941,4 +914,4 @@ entry: } -attributes #0 = { nounwind readonly uwtable sanitize_numerical_stability "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign,preserve-sign" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="true" "use-soft-float"="false" } +attributes #0 = { nounwind readonly uwtable sanitize_numerical_stability "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign,preserve-sign" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" } diff --git a/llvm/test/LTO/AArch64/Inputs/foo.ll b/llvm/test/LTO/AArch64/Inputs/foo.ll deleted file mode 100644 index 961b0d4..0000000 --- a/llvm/test/LTO/AArch64/Inputs/foo.ll +++ /dev/null @@ -1,16 +0,0 @@ -target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" -target triple = "aarch64-unknown-linux-gnu" - -define dso_local i32 @foo() #0 { -entry: - ret i32 42 -} - -attributes #0 = { noinline nounwind optnone uwtable } - -!llvm.module.flags = !{!0, !1, !2, !3} - -!0 = !{i32 8, !"branch-target-enforcement", i32 1} -!1 = !{i32 8, !"sign-return-address", i32 1} -!2 = !{i32 8, !"sign-return-address-all", i32 1} -!3 = !{i32 8, !"sign-return-address-with-bkey", i32 1} diff --git a/llvm/test/LTO/AArch64/TestInputs/bar.ll b/llvm/test/LTO/AArch64/TestInputs/bar.ll new file mode 100644 index 0000000..7c2a753 --- /dev/null +++ b/llvm/test/LTO/AArch64/TestInputs/bar.ll @@ -0,0 +1,35 @@ +;; This file contains the new semantic of the branch-target-enforcement, sign-return-address. +;; Used for test mixing a mixed link case and also verify the import too in llc. + +; RUN: llc -mattr=+pauth -mattr=+bti %s -o - | FileCheck %s + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +define dso_local void @bar() #0 { +entry: + ret void +} +; CHECK-LABEL: bar: +; CHECK-NOT: hint +; CHECK-NOT: bti +; CHECK: ret + +define dso_local void @baz() #1 { +entry: + ret void +} + +; CHECK-LABEL: baz: +; CHECK: bti c +; CHECK: ret + +attributes #0 = { noinline nounwind optnone uwtable } +attributes #1 = { noinline nounwind optnone uwtable "branch-target-enforcement" } + +!llvm.module.flags = !{!0, !1, !2, !3} + +!0 = !{i32 8, !"branch-target-enforcement", i32 2} +!1 = !{i32 8, !"sign-return-address", i32 2} +!2 = !{i32 8, !"sign-return-address-all", i32 2} +!3 = !{i32 8, !"sign-return-address-with-bkey", i32 2} diff --git a/llvm/test/LTO/AArch64/TestInputs/fiz.ll b/llvm/test/LTO/AArch64/TestInputs/fiz.ll new file mode 100644 index 0000000..e578426 --- /dev/null +++ b/llvm/test/LTO/AArch64/TestInputs/fiz.ll @@ -0,0 +1,41 @@ +;; This file contains the previous semantic of the branch-target-enforcement, sign-return-address. +;; Used for test mixing a mixed link case and also verify the import too in llc. + +; RUN: llc -mattr=+pauth -mattr=+bti %s -o - | FileCheck %s + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +declare void @func() + +define i32 @fiz_on() #0 { +entry: + call void @func() + ret i32 42 +} + +; CHECK-LABEL: fiz_on: +; CHECK: paciasp +; CHECK: bl func +; CHECK: retaa + +define i32 @fiz_off() #1 { +entry: + ret i32 43 +} + +; CHECK-LABEL: fiz_off: +; CHECK-NOT: pac +; CHECK-NOT: hint +; CHECK-NOT: bti +; CHECK: ret + +attributes #0 = { noinline nounwind optnone uwtable } +attributes #1 = { noinline nounwind optnone uwtable "branch-target-enforcement"="false" "sign-return-address"="none" } + +!llvm.module.flags = !{!0, !1, !2, !3} + +!0 = !{i32 8, !"branch-target-enforcement", i32 1} +!1 = !{i32 8, !"sign-return-address", i32 1} +!2 = !{i32 8, !"sign-return-address-all", i32 0} +!3 = !{i32 8, !"sign-return-address-with-bkey", i32 0} diff --git a/llvm/test/LTO/AArch64/TestInputs/foo.ll b/llvm/test/LTO/AArch64/TestInputs/foo.ll new file mode 100644 index 0000000..689d938 --- /dev/null +++ b/llvm/test/LTO/AArch64/TestInputs/foo.ll @@ -0,0 +1,38 @@ +;; This file contains the previous semantic of the branch-target-enforcement, sign-return-address. +;; Used for test mixing a mixed link case and also verify the import too in llc. + +; RUN: llc -mattr=+pauth -mattr=+bti %s -o - | FileCheck %s + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +define i32 @foo_on() #0 { +entry: + ret i32 42 +} + +; CHECK-LABEL: foo_on: +; CHECK: pacibsp +; CHECK: mov +; CHECK: retab + +define i32 @foo_off() #1 { +entry: + ret i32 43 +} + +; CHECK-LABEL: foo_off: +; CHECK-NOT: pac +; CHECK-NOT: hint +; CHECK-NOT: bti +; CHECK: ret + +attributes #0 = { noinline nounwind optnone uwtable } +attributes #1 = { noinline nounwind optnone uwtable "branch-target-enforcement"="false" "sign-return-address"="none" } + +!llvm.module.flags = !{!0, !1, !2, !3} + +!0 = !{i32 8, !"branch-target-enforcement", i32 1} +!1 = !{i32 8, !"sign-return-address", i32 1} +!2 = !{i32 8, !"sign-return-address-all", i32 1} +!3 = !{i32 8, !"sign-return-address-with-bkey", i32 1} diff --git a/llvm/test/LTO/AArch64/TestInputs/old.ll b/llvm/test/LTO/AArch64/TestInputs/old.ll new file mode 100644 index 0000000..2b1758b --- /dev/null +++ b/llvm/test/LTO/AArch64/TestInputs/old.ll @@ -0,0 +1,59 @@ +;; This file contains the previous semantic of the branch-target-enforcement, sign-return-address. +;; Used for test mixing a mixed link case and also verify the import too in llc. + +; RUN: llc -mattr=+pauth -mattr=+bti %s -o - | FileCheck %s + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +define i32 @old_bti() #0 { +entry: + ret i32 2 +} + +; CHECK-LABEL: old_bti: +; CHECK: bti c +; CHECK: mov +; CHECK: ret + +define i32 @old_pac() #1 { +entry: + ret i32 2 +} + +; CHECK-LABEL: old_pac: +; CHECK: paciasp +; CHECK: mov +; CHECK: retaa + + +define i32 @old_none() #2 { +entry: + ret i32 3 +} + +; CHECK-LABEL: old_none: +; CHECK-NOT: hint +; CHECK-NOT: paci +; CHECK-NOT: bti +; CHECK: ret + +declare i32 @func(i32) + +define i32 @old_none_leaf() #3 { +entry: + %0 = call i32 @func() + ret i32 %0 +} + +; CHECK-LABEL: old_none_leaf: +; CHECK: paciasp +; CHECK: bl func +; CHECK: retaa + +attributes #0 = { noinline nounwind optnone "branch-target-enforcement"="true" } +attributes #1 = { noinline nounwind optnone "branch-target-enforcement"="false" "sign-return-address"="all" "sign-return-address-key"="a_key" } +attributes #2 = { noinline nounwind optnone "branch-target-enforcement"="false" "sign-return-address"="none" } +attributes #3 = { noinline nounwind optnone "branch-target-enforcement"="false" "sign-return-address"="non-leaf" "sign-return-address-key"="a_key" } + +;; Intentionally no module flags diff --git a/llvm/test/LTO/AArch64/link-branch-target-enforcement.ll b/llvm/test/LTO/AArch64/link-branch-target-enforcement.ll index b3c9828..aef8907 100644 --- a/llvm/test/LTO/AArch64/link-branch-target-enforcement.ll +++ b/llvm/test/LTO/AArch64/link-branch-target-enforcement.ll @@ -1,10 +1,10 @@ -; Testcase to check that module with different branch-target-enforcement can -; be mixed. -; +;; Testcase to check that module with different branch-target-enforcement can +;; be mixed. +;; ; RUN: llvm-as %s -o %t1.bc -; RUN: llvm-as %p/Inputs/foo.ll -o %t2.bc +; RUN: llvm-as %p/TestInputs/foo.ll -o %t2.bc ; RUN: llvm-lto -exported-symbol main \ -; RUN: -exported-symbol foo \ +; RUN: -exported-symbol foo_on \ ; RUN: -filetype=obj \ ; RUN: %t1.bc %t2.bc \ ; RUN: -o %t1.exe 2>&1 | FileCheck --allow-empty %s @@ -14,11 +14,11 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple = "aarch64-unknown-linux-gnu" -declare i32 @foo(); +declare i32 @foo_on(); define i32 @main() "sign-return-address"="non-leaf" "sign-return-address-key"="a_key" { entry: - %add = call i32 @foo() + %add = call i32 @foo_on() ret i32 %add } @@ -30,9 +30,12 @@ entry: ; CHECK-NOT: linking module flags 'branch-target-enforcement': IDs have conflicting values in ; CHECK-DUMP: <main>: +; CHECK-DUMP: paciasp +; CHECK-DUMP: str ; CHECK-DUMP: bl 0x8 <main+0x8> -; CHECK-DUMP: <foo>: +; CHECK-DUMP: <foo_on>: +; CHECK-DUMP: pacibsp -; `main` doesn't support BTI while `foo` does, so in the binary -; we should see only PAC which is supported by both. +;; `main` doesn't support BTI while `foo` does, so in the binary +;; we should see only PAC which is supported by both. ; CHECK-PROP: Properties: aarch64 feature: PAC
\ No newline at end of file diff --git a/llvm/test/LTO/AArch64/link-sign-return-address.ll b/llvm/test/LTO/AArch64/link-sign-return-address.ll new file mode 100644 index 0000000..df6276f --- /dev/null +++ b/llvm/test/LTO/AArch64/link-sign-return-address.ll @@ -0,0 +1,127 @@ +;; Testcase to check that module with different sign return address can +;; be mixed. +; +; RUN: llvm-as %s -o %t1.bc +; RUN: llvm-as %p/TestInputs/foo.ll -o %t2.bc +; RUN: llvm-as %p/TestInputs/fiz.ll -o %t3.bc +; RUN: llvm-as %p/TestInputs/bar.ll -o %t4.bc +; RUN: llvm-as %p/TestInputs/old.ll -o %t5.bc +; RUN: llvm-lto -exported-symbol main \ +; RUN: -exported-symbol foo_on \ +; RUN: -exported-symbol foo_off \ +; RUN: -exported-symbol fiz_on \ +; RUN: -exported-symbol fiz_off \ +; RUN: -exported-symbol bar \ +; RUN: -exported-symbol baz \ +; RUN: -exported-symbol old_bti \ +; RUN: -exported-symbol old_pac \ +; RUN: -exported-symbol old_none \ +; RUN: -filetype=obj \ +; RUN: %t5.bc %t4.bc %t3.bc %t2.bc %t1.bc \ +; RUN: -o %t1.exe 2>&1 +; RUN: llvm-objdump -d %t1.exe | FileCheck --check-prefix=CHECK-DUMP %s +; RUN: llvm-readelf -n %t1.exe | FileCheck --allow-empty --check-prefix=CHECK-PROP %s + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +declare i32 @foo_on(); +declare i32 @foo_off(); +declare i32 @fiz_on(); +declare i32 @fiz_off(); +declare void @baz(); +declare void @bar(); +declare i32 @old_bti(); +declare i32 @old_pac(); +declare i32 @old_none(); + +define i32 @main() #0 { +entry: + call i32 @foo_on() + call i32 @foo_off() + call i32 @fiz_on() + call i32 @fiz_off() + call void @bar() + call void @baz() + call i32 @old_bti() + call i32 @old_pac() + call i32 @old_none() + ret i32 0 +} + +attributes #0 = { noinline nounwind optnone } + +!llvm.module.flags = !{!0, !1, !2, !3 } +!0 = !{i32 8, !"branch-target-enforcement", i32 0} +!1 = !{i32 8, !"sign-return-address", i32 0} +!2 = !{i32 8, !"sign-return-address-all", i32 0} +!3 = !{i32 8, !"sign-return-address-with-bkey", i32 0} + + +; CHECK-DUMP-LABEL: <old_bti>: +; CHECK-DUMP-NEXT: bti c +; CHECK-DUMP-NEXT: mov w0, #0x2 +; CHECK-DUMP-NEXT: ret + +; CHECK-DUMP-LABEL: <old_pac>: +; CHECK-DUMP-NEXT: paciasp +; CHECK-DUMP-NEXT: mov w0, #0x2 +; CHECK-DUMP-NEXT: autiasp +; CHECK-DUMP-NEXT: ret + +; CHECK-DUMP-LABEL: <old_none>: +; CHECK-DUMP-NEXT: mov w0, #0x3 +; CHECK-DUMP-NEXT: ret + +; CHECK-DUMP-LABEL: <bar>: +; CHECK-DUMP-NEXT: ret + +; CHECK-DUMP-LABEL: <baz>: +; CHECK-DUMP-NEXT: bti c +; CHECK-DUMP-NEXT: ret + +;; fiz.ll represents a module with the old style of the function attributes. +;; fiz_on shall have PAC with A-key as it requested at module level. +; CHECK-DUMP-LABEL: <fiz_on>: +; CHECK-DUMP-NEXT: paciasp +; CHECK-DUMP-NEXT: str x30, [sp, #-0x10]! +; CHECK-DUMP-NEXT: bl 0x38 <fiz_on+0x8> +; CHECK-DUMP-NEXT: mov w0, #0x2a +; CHECK-DUMP-NEXT: ldr x30, [sp], #0x10 +; CHECK-DUMP-NEXT: autiasp +; CHECK-DUMP-NEXT: ret + +;; fiz_off shall not have BTI or PAC instructions as they are disabled at function scope. +; CHECK-DUMP-LABEL: <fiz_off>: +; CHECK-DUMP-NEXT: mov w0, #0x2b +; CHECK-DUMP-NEXT: ret + +;; foo.ll represents a module with the old style of the function attributes. +;; foo_on shall have PAC with B-key as it requested at module level. +; CHECK-DUMP-LABEL: <foo_on>: +; CHECK-DUMP-NEXT: pacibsp +; CHECK-DUMP-NEXT: mov w0, #0x2a +; CHECK-DUMP-NEXT: autibsp +; CHECK-DUMP-NEXT: ret + +;; foo_off shall not have BTI or PAC instructions as they are disabled at function scope. +; CHECK-DUMP-LABEL: <foo_off>: +; CHECK-DUMP-NEXT: mov w0, #0x2b +; CHECK-DUMP-NEXT: ret + +; CHECK-DUMP-LABEL: <main>: +; CHECK-DUMP-NOT: paciasp +; CHECK-DUMP-NEXT: str x30, +; CHECK-DUMP-NEXT: bl +; CHECK-DUMP-NEXT: bl +; CHECK-DUMP-NEXT: bl +; CHECK-DUMP-NEXT: bl +; CHECK-DUMP-NEXT: bl +; CHECK-DUMP-NEXT: bl +; CHECK-DUMP-NEXT: bl +; CHECK-DUMP-NEXT: bl +; CHECK-DUMP-NEXT: bl + +;; `main` doesn't support PAC sign-return-address while `foo` does, so in the binary +;; we should not see anything. +; CHECK-PROP-NOT: Properties: aarch64 feature: PAC diff --git a/llvm/test/Linker/link-arm-and-thumb.ll b/llvm/test/Linker/link-arm-and-thumb.ll index a90f212..b5984bf 100644 --- a/llvm/test/Linker/link-arm-and-thumb.ll +++ b/llvm/test/Linker/link-arm-and-thumb.ll @@ -13,11 +13,11 @@ entry: ret i32 %add } -; CHECK: define i32 @main() { +; CHECK: define i32 @main() ; CHECK: define i32 @foo(i32 %a, i32 %b) [[ARM_ATTRS:#[0-9]+]] ; CHECK: define i32 @bar(i32 %a, i32 %b) [[THUMB_ATTRS:#[0-9]+]] -; CHECK: attributes [[ARM_ATTRS]] = { "target-features"="-thumb-mode" } -; CHECK: attributes [[THUMB_ATTRS]] = { "target-features"="+thumb-mode" } +; CHECK: attributes [[ARM_ATTRS]] = {{{.*}}"target-features"="-thumb-mode" } +; CHECK: attributes [[THUMB_ATTRS]] = {{{.*}}"target-features"="+thumb-mode" } ; STDERR-NOT: warning: Linking two modules of different target triples: diff --git a/llvm/test/MC/AMDGPU/literals.s b/llvm/test/MC/AMDGPU/literals.s index 78aa8f2..3faea99 100644 --- a/llvm/test/MC/AMDGPU/literals.s +++ b/llvm/test/MC/AMDGPU/literals.s @@ -20,282 +20,282 @@ //---------------------------------------------------------------------------// v_fract_f64 v[0:1], 0.5 -// SICI: v_fract_f64_e32 v[0:1], 0.5 ; encoding: [0xf0,0x7c,0x00,0x7e] -// GFX89: v_fract_f64_e32 v[0:1], 0.5 ; encoding: [0xf0,0x64,0x00,0x7e] -// GFX12XX: v_fract_f64_e32 v[0:1], 0.5 ; encoding: [0xf0,0x7c,0x00,0x7e] // GFX11: v_fract_f64_e32 v[0:1], 0.5 ; encoding: [0xf0,0x7c,0x00,0x7e] +// GFX12XX: v_fract_f64_e32 v[0:1], 0.5 ; encoding: [0xf0,0x7c,0x00,0x7e] +// GFX89: v_fract_f64_e32 v[0:1], 0.5 ; encoding: [0xf0,0x64,0x00,0x7e] +// SICI: v_fract_f64_e32 v[0:1], 0.5 ; encoding: [0xf0,0x7c,0x00,0x7e] v_sqrt_f64 v[0:1], -4.0 -// SICI: v_sqrt_f64_e32 v[0:1], -4.0 ; encoding: [0xf7,0x68,0x00,0x7e] -// GFX89: v_sqrt_f64_e32 v[0:1], -4.0 ; encoding: [0xf7,0x50,0x00,0x7e] -// GFX12XX: v_sqrt_f64_e32 v[0:1], -4.0 ; encoding: [0xf7,0x68,0x00,0x7e] // GFX11: v_sqrt_f64_e32 v[0:1], -4.0 ; encoding: [0xf7,0x68,0x00,0x7e] +// GFX12XX: v_sqrt_f64_e32 v[0:1], -4.0 ; encoding: [0xf7,0x68,0x00,0x7e] +// GFX89: v_sqrt_f64_e32 v[0:1], -4.0 ; encoding: [0xf7,0x50,0x00,0x7e] +// SICI: v_sqrt_f64_e32 v[0:1], -4.0 ; encoding: [0xf7,0x68,0x00,0x7e] v_log_clamp_f32 v1, 0.5 // NOGFX8PLUS: :[[@LINE-1]]:1: error: instruction not supported on this GPU // SICI: v_log_clamp_f32_e32 v1, 0.5 ; encoding: [0xf0,0x4c,0x02,0x7e] v_trunc_f32 v0, 0.5 -// SICI: v_trunc_f32_e32 v0, 0.5 ; encoding: [0xf0,0x42,0x00,0x7e] -// GFX89: v_trunc_f32_e32 v0, 0.5 ; encoding: [0xf0,0x38,0x00,0x7e] -// GFX12XX: v_trunc_f32_e32 v0, 0.5 ; encoding: [0xf0,0x42,0x00,0x7e] // GFX11: v_trunc_f32_e32 v0, 0.5 ; encoding: [0xf0,0x42,0x00,0x7e] +// GFX12XX: v_trunc_f32_e32 v0, 0.5 ; encoding: [0xf0,0x42,0x00,0x7e] +// GFX89: v_trunc_f32_e32 v0, 0.5 ; encoding: [0xf0,0x38,0x00,0x7e] +// SICI: v_trunc_f32_e32 v0, 0.5 ; encoding: [0xf0,0x42,0x00,0x7e] v_fract_f64 v[0:1], -1.0 -// SICI: v_fract_f64_e32 v[0:1], -1.0 ; encoding: [0xf3,0x7c,0x00,0x7e] -// GFX89: v_fract_f64_e32 v[0:1], -1.0 ; encoding: [0xf3,0x64,0x00,0x7e] -// GFX12XX: v_fract_f64_e32 v[0:1], -1.0 ; encoding: [0xf3,0x7c,0x00,0x7e] // GFX11: v_fract_f64_e32 v[0:1], -1.0 ; encoding: [0xf3,0x7c,0x00,0x7e] +// GFX12XX: v_fract_f64_e32 v[0:1], -1.0 ; encoding: [0xf3,0x7c,0x00,0x7e] +// GFX89: v_fract_f64_e32 v[0:1], -1.0 ; encoding: [0xf3,0x64,0x00,0x7e] +// SICI: v_fract_f64_e32 v[0:1], -1.0 ; encoding: [0xf3,0x7c,0x00,0x7e] v_trunc_f32 v0, -1.0 -// SICI: v_trunc_f32_e32 v0, -1.0 ; encoding: [0xf3,0x42,0x00,0x7e] -// GFX89: v_trunc_f32_e32 v0, -1.0 ; encoding: [0xf3,0x38,0x00,0x7e] -// GFX12XX: v_trunc_f32_e32 v0, -1.0 ; encoding: [0xf3,0x42,0x00,0x7e] // GFX11: v_trunc_f32_e32 v0, -1.0 ; encoding: [0xf3,0x42,0x00,0x7e] +// GFX12XX: v_trunc_f32_e32 v0, -1.0 ; encoding: [0xf3,0x42,0x00,0x7e] +// GFX89: v_trunc_f32_e32 v0, -1.0 ; encoding: [0xf3,0x38,0x00,0x7e] +// SICI: v_trunc_f32_e32 v0, -1.0 ; encoding: [0xf3,0x42,0x00,0x7e] v_fract_f64 v[0:1], 4.0 -// SICI: v_fract_f64_e32 v[0:1], 4.0 ; encoding: [0xf6,0x7c,0x00,0x7e] -// GFX89: v_fract_f64_e32 v[0:1], 4.0 ; encoding: [0xf6,0x64,0x00,0x7e] -// GFX12XX: v_fract_f64_e32 v[0:1], 4.0 ; encoding: [0xf6,0x7c,0x00,0x7e] // GFX11: v_fract_f64_e32 v[0:1], 4.0 ; encoding: [0xf6,0x7c,0x00,0x7e] +// GFX12XX: v_fract_f64_e32 v[0:1], 4.0 ; encoding: [0xf6,0x7c,0x00,0x7e] +// GFX89: v_fract_f64_e32 v[0:1], 4.0 ; encoding: [0xf6,0x64,0x00,0x7e] +// SICI: v_fract_f64_e32 v[0:1], 4.0 ; encoding: [0xf6,0x7c,0x00,0x7e] v_trunc_f32 v0, 4.0 -// SICI: v_trunc_f32_e32 v0, 4.0 ; encoding: [0xf6,0x42,0x00,0x7e] -// GFX89: v_trunc_f32_e32 v0, 4.0 ; encoding: [0xf6,0x38,0x00,0x7e] -// GFX12XX: v_trunc_f32_e32 v0, 4.0 ; encoding: [0xf6,0x42,0x00,0x7e] // GFX11: v_trunc_f32_e32 v0, 4.0 ; encoding: [0xf6,0x42,0x00,0x7e] +// GFX12XX: v_trunc_f32_e32 v0, 4.0 ; encoding: [0xf6,0x42,0x00,0x7e] +// GFX89: v_trunc_f32_e32 v0, 4.0 ; encoding: [0xf6,0x38,0x00,0x7e] +// SICI: v_trunc_f32_e32 v0, 4.0 ; encoding: [0xf6,0x42,0x00,0x7e] v_fract_f64 v[0:1], 0.0 -// SICI: v_fract_f64_e32 v[0:1], 0 ; encoding: [0x80,0x7c,0x00,0x7e] -// GFX89: v_fract_f64_e32 v[0:1], 0 ; encoding: [0x80,0x64,0x00,0x7e] -// GFX12XX: v_fract_f64_e32 v[0:1], 0 ; encoding: [0x80,0x7c,0x00,0x7e] // GFX11: v_fract_f64_e32 v[0:1], 0 ; encoding: [0x80,0x7c,0x00,0x7e] +// GFX12XX: v_fract_f64_e32 v[0:1], 0 ; encoding: [0x80,0x7c,0x00,0x7e] +// GFX89: v_fract_f64_e32 v[0:1], 0 ; encoding: [0x80,0x64,0x00,0x7e] +// SICI: v_fract_f64_e32 v[0:1], 0 ; encoding: [0x80,0x7c,0x00,0x7e] v_trunc_f32 v0, 0.0 -// SICI: v_trunc_f32_e32 v0, 0 ; encoding: [0x80,0x42,0x00,0x7e] -// GFX89: v_trunc_f32_e32 v0, 0 ; encoding: [0x80,0x38,0x00,0x7e] -// GFX12XX: v_trunc_f32_e32 v0, 0 ; encoding: [0x80,0x42,0x00,0x7e] // GFX11: v_trunc_f32_e32 v0, 0 ; encoding: [0x80,0x42,0x00,0x7e] +// GFX12XX: v_trunc_f32_e32 v0, 0 ; encoding: [0x80,0x42,0x00,0x7e] +// GFX89: v_trunc_f32_e32 v0, 0 ; encoding: [0x80,0x38,0x00,0x7e] +// SICI: v_trunc_f32_e32 v0, 0 ; encoding: [0x80,0x42,0x00,0x7e] v_fract_f64 v[0:1], 1.5 -// SICI: v_fract_f64_e32 v[0:1], 0x3ff80000 ; encoding: [0xff,0x7c,0x00,0x7e,0x00,0x00,0xf8,0x3f] -// GFX89: v_fract_f64_e32 v[0:1], 0x3ff80000 ; encoding: [0xff,0x64,0x00,0x7e,0x00,0x00,0xf8,0x3f] -// GFX12XX: v_fract_f64_e32 v[0:1], 0x3ff80000 ; encoding: [0xff,0x7c,0x00,0x7e,0x00,0x00,0xf8,0x3f] // GFX11: v_fract_f64_e32 v[0:1], 0x3ff80000 ; encoding: [0xff,0x7c,0x00,0x7e,0x00,0x00,0xf8,0x3f] +// GFX12XX: v_fract_f64_e32 v[0:1], 0x3ff80000 ; encoding: [0xff,0x7c,0x00,0x7e,0x00,0x00,0xf8,0x3f] +// GFX89: v_fract_f64_e32 v[0:1], 0x3ff80000 ; encoding: [0xff,0x64,0x00,0x7e,0x00,0x00,0xf8,0x3f] +// SICI: v_fract_f64_e32 v[0:1], 0x3ff80000 ; encoding: [0xff,0x7c,0x00,0x7e,0x00,0x00,0xf8,0x3f] v_trunc_f32 v0, 1.5 -// SICI: v_trunc_f32_e32 v0, 0x3fc00000 ; encoding: [0xff,0x42,0x00,0x7e,0x00,0x00,0xc0,0x3f] -// GFX89: v_trunc_f32_e32 v0, 0x3fc00000 ; encoding: [0xff,0x38,0x00,0x7e,0x00,0x00,0xc0,0x3f] -// GFX12XX: v_trunc_f32_e32 v0, 0x3fc00000 ; encoding: [0xff,0x42,0x00,0x7e,0x00,0x00,0xc0,0x3f] // GFX11: v_trunc_f32_e32 v0, 0x3fc00000 ; encoding: [0xff,0x42,0x00,0x7e,0x00,0x00,0xc0,0x3f] +// GFX12XX: v_trunc_f32_e32 v0, 0x3fc00000 ; encoding: [0xff,0x42,0x00,0x7e,0x00,0x00,0xc0,0x3f] +// GFX89: v_trunc_f32_e32 v0, 0x3fc00000 ; encoding: [0xff,0x38,0x00,0x7e,0x00,0x00,0xc0,0x3f] +// SICI: v_trunc_f32_e32 v0, 0x3fc00000 ; encoding: [0xff,0x42,0x00,0x7e,0x00,0x00,0xc0,0x3f] v_fract_f64 v[0:1], -3.1415 -// SICI: v_fract_f64_e32 v[0:1], 0xc00921ca ; encoding: [0xff,0x7c,0x00,0x7e,0xca,0x21,0x09,0xc0] -// GFX89: v_fract_f64_e32 v[0:1], 0xc00921ca ; encoding: [0xff,0x64,0x00,0x7e,0xca,0x21,0x09,0xc0] -// NOSICI: :[[@LINE-3]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero -// NOGFX89: :[[@LINE-4]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero // GFX11: v_fract_f64_e32 v[0:1], 0xc00921ca ; encoding: [0xff,0x7c,0x00,0x7e,0xca,0x21,0x09,0xc0] // GFX12: v_fract_f64_e32 v[0:1], 0xc00921ca ; encoding: [0xff,0x7c,0x00,0x7e,0xca,0x21,0x09,0xc0] // GFX1250: v_fract_f64_e32 v[0:1], 0xc00921cac083126f ; encoding: [0xfe,0x7c,0x00,0x7e,0x6f,0x12,0x83,0xc0,0xca,0x21,0x09,0xc0] -// NOGFX11: :[[@LINE-8]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero -// NOGFX12: :[[@LINE-9]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// GFX89: v_fract_f64_e32 v[0:1], 0xc00921ca ; encoding: [0xff,0x64,0x00,0x7e,0xca,0x21,0x09,0xc0] +// NOGFX11: :[[@LINE-5]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// NOGFX12: :[[@LINE-6]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// NOGFX89: :[[@LINE-7]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// NOSICI: :[[@LINE-8]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// SICI: v_fract_f64_e32 v[0:1], 0xc00921ca ; encoding: [0xff,0x7c,0x00,0x7e,0xca,0x21,0x09,0xc0] // NOSICIVI: :[[@LINE-3]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero v_trunc_f32 v0, -3.1415 -// SICI: v_trunc_f32_e32 v0, 0xc0490e56 ; encoding: [0xff,0x42,0x00,0x7e,0x56,0x0e,0x49,0xc0] -// GFX89: v_trunc_f32_e32 v0, 0xc0490e56 ; encoding: [0xff,0x38,0x00,0x7e,0x56,0x0e,0x49,0xc0] -// GFX12XX: v_trunc_f32_e32 v0, 0xc0490e56 ; encoding: [0xff,0x42,0x00,0x7e,0x56,0x0e,0x49,0xc0] // GFX11: v_trunc_f32_e32 v0, 0xc0490e56 ; encoding: [0xff,0x42,0x00,0x7e,0x56,0x0e,0x49,0xc0] +// GFX12XX: v_trunc_f32_e32 v0, 0xc0490e56 ; encoding: [0xff,0x42,0x00,0x7e,0x56,0x0e,0x49,0xc0] +// GFX89: v_trunc_f32_e32 v0, 0xc0490e56 ; encoding: [0xff,0x38,0x00,0x7e,0x56,0x0e,0x49,0xc0] +// SICI: v_trunc_f32_e32 v0, 0xc0490e56 ; encoding: [0xff,0x42,0x00,0x7e,0x56,0x0e,0x49,0xc0] v_fract_f64 v[0:1], 100000000000000000000000.0 -// SICI: v_fract_f64_e32 v[0:1], 0x44b52d02 ; encoding: [0xff,0x7c,0x00,0x7e,0x02,0x2d,0xb5,0x44] -// GFX89: v_fract_f64_e32 v[0:1], 0x44b52d02 ; encoding: [0xff,0x64,0x00,0x7e,0x02,0x2d,0xb5,0x44] -// NOSICI: :[[@LINE-3]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero -// NOGFX89: :[[@LINE-4]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero // GFX11: v_fract_f64_e32 v[0:1], 0x44b52d02 ; encoding: [0xff,0x7c,0x00,0x7e,0x02,0x2d,0xb5,0x44] // GFX12: v_fract_f64_e32 v[0:1], 0x44b52d02 ; encoding: [0xff,0x7c,0x00,0x7e,0x02,0x2d,0xb5,0x44] // GFX1250: v_fract_f64_e32 v[0:1], 0x44b52d02c7e14af6 ; encoding: [0xfe,0x7c,0x00,0x7e,0xf6,0x4a,0xe1,0xc7,0x02,0x2d,0xb5,0x44] -// NOGFX11: :[[@LINE-8]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero -// NOGFX12: :[[@LINE-9]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// GFX89: v_fract_f64_e32 v[0:1], 0x44b52d02 ; encoding: [0xff,0x64,0x00,0x7e,0x02,0x2d,0xb5,0x44] +// NOGFX11: :[[@LINE-5]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// NOGFX12: :[[@LINE-6]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// NOGFX89: :[[@LINE-7]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// NOSICI: :[[@LINE-8]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// SICI: v_fract_f64_e32 v[0:1], 0x44b52d02 ; encoding: [0xff,0x7c,0x00,0x7e,0x02,0x2d,0xb5,0x44] // NOSICIVI: :[[@LINE-3]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero v_trunc_f32 v0, 100000000000000000000000.0 -// SICI: v_trunc_f32_e32 v0, 0x65a96816 ; encoding: [0xff,0x42,0x00,0x7e,0x16,0x68,0xa9,0x65] -// GFX89: v_trunc_f32_e32 v0, 0x65a96816 ; encoding: [0xff,0x38,0x00,0x7e,0x16,0x68,0xa9,0x65] -// GFX12XX: v_trunc_f32_e32 v0, 0x65a96816 ; encoding: [0xff,0x42,0x00,0x7e,0x16,0x68,0xa9,0x65] // GFX11: v_trunc_f32_e32 v0, 0x65a96816 ; encoding: [0xff,0x42,0x00,0x7e,0x16,0x68,0xa9,0x65] +// GFX12XX: v_trunc_f32_e32 v0, 0x65a96816 ; encoding: [0xff,0x42,0x00,0x7e,0x16,0x68,0xa9,0x65] +// GFX89: v_trunc_f32_e32 v0, 0x65a96816 ; encoding: [0xff,0x38,0x00,0x7e,0x16,0x68,0xa9,0x65] +// SICI: v_trunc_f32_e32 v0, 0x65a96816 ; encoding: [0xff,0x42,0x00,0x7e,0x16,0x68,0xa9,0x65] v_fract_f64 v[0:1], 10000000.0 -// SICI: v_fract_f64_e32 v[0:1], 0x416312d0 ; encoding: [0xff,0x7c,0x00,0x7e,0xd0,0x12,0x63,0x41] -// GFX89: v_fract_f64_e32 v[0:1], 0x416312d0 ; encoding: [0xff,0x64,0x00,0x7e,0xd0,0x12,0x63,0x41] -// GFX12XX: v_fract_f64_e32 v[0:1], 0x416312d0 ; encoding: [0xff,0x7c,0x00,0x7e,0xd0,0x12,0x63,0x41] // GFX11: v_fract_f64_e32 v[0:1], 0x416312d0 ; encoding: [0xff,0x7c,0x00,0x7e,0xd0,0x12,0x63,0x41] +// GFX12XX: v_fract_f64_e32 v[0:1], 0x416312d0 ; encoding: [0xff,0x7c,0x00,0x7e,0xd0,0x12,0x63,0x41] +// GFX89: v_fract_f64_e32 v[0:1], 0x416312d0 ; encoding: [0xff,0x64,0x00,0x7e,0xd0,0x12,0x63,0x41] +// SICI: v_fract_f64_e32 v[0:1], 0x416312d0 ; encoding: [0xff,0x7c,0x00,0x7e,0xd0,0x12,0x63,0x41] v_trunc_f32 v0, 10000000.0 -// SICI: v_trunc_f32_e32 v0, 0x4b189680 ; encoding: [0xff,0x42,0x00,0x7e,0x80,0x96,0x18,0x4b] -// GFX89: v_trunc_f32_e32 v0, 0x4b189680 ; encoding: [0xff,0x38,0x00,0x7e,0x80,0x96,0x18,0x4b] -// GFX12XX: v_trunc_f32_e32 v0, 0x4b189680 ; encoding: [0xff,0x42,0x00,0x7e,0x80,0x96,0x18,0x4b] // GFX11: v_trunc_f32_e32 v0, 0x4b189680 ; encoding: [0xff,0x42,0x00,0x7e,0x80,0x96,0x18,0x4b] +// GFX12XX: v_trunc_f32_e32 v0, 0x4b189680 ; encoding: [0xff,0x42,0x00,0x7e,0x80,0x96,0x18,0x4b] +// GFX89: v_trunc_f32_e32 v0, 0x4b189680 ; encoding: [0xff,0x38,0x00,0x7e,0x80,0x96,0x18,0x4b] +// SICI: v_trunc_f32_e32 v0, 0x4b189680 ; encoding: [0xff,0x42,0x00,0x7e,0x80,0x96,0x18,0x4b] v_fract_f64 v[0:1], 3.402823e+38 -// SICI: v_fract_f64_e32 v[0:1], 0x47efffff ; encoding: [0xff,0x7c,0x00,0x7e,0xff,0xff,0xef,0x47] -// GFX89: v_fract_f64_e32 v[0:1], 0x47efffff ; encoding: [0xff,0x64,0x00,0x7e,0xff,0xff,0xef,0x47] -// NOSICI: :[[@LINE-3]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero -// NOGFX89: :[[@LINE-4]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero // GFX11: v_fract_f64_e32 v[0:1], 0x47efffff ; encoding: [0xff,0x7c,0x00,0x7e,0xff,0xff,0xef,0x47] // GFX12: v_fract_f64_e32 v[0:1], 0x47efffff ; encoding: [0xff,0x7c,0x00,0x7e,0xff,0xff,0xef,0x47] // GFX1250: v_fract_f64_e32 v[0:1], 0x47efffff966ad924 ; encoding: [0xfe,0x7c,0x00,0x7e,0x24,0xd9,0x6a,0x96,0xff,0xff,0xef,0x47] -// NOGFX11: :[[@LINE-8]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero -// NOGFX12: :[[@LINE-9]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// GFX89: v_fract_f64_e32 v[0:1], 0x47efffff ; encoding: [0xff,0x64,0x00,0x7e,0xff,0xff,0xef,0x47] +// NOGFX11: :[[@LINE-5]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// NOGFX12: :[[@LINE-6]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// NOGFX89: :[[@LINE-7]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// NOSICI: :[[@LINE-8]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// SICI: v_fract_f64_e32 v[0:1], 0x47efffff ; encoding: [0xff,0x7c,0x00,0x7e,0xff,0xff,0xef,0x47] // NOSICIVI: :[[@LINE-3]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero v_trunc_f32 v0, 3.402823e+38 -// SICI: v_trunc_f32_e32 v0, 0x7f7ffffd ; encoding: [0xff,0x42,0x00,0x7e,0xfd,0xff,0x7f,0x7f] -// GFX89: v_trunc_f32_e32 v0, 0x7f7ffffd ; encoding: [0xff,0x38,0x00,0x7e,0xfd,0xff,0x7f,0x7f] -// GFX12XX: v_trunc_f32_e32 v0, 0x7f7ffffd ; encoding: [0xff,0x42,0x00,0x7e,0xfd,0xff,0x7f,0x7f] // GFX11: v_trunc_f32_e32 v0, 0x7f7ffffd ; encoding: [0xff,0x42,0x00,0x7e,0xfd,0xff,0x7f,0x7f] +// GFX12XX: v_trunc_f32_e32 v0, 0x7f7ffffd ; encoding: [0xff,0x42,0x00,0x7e,0xfd,0xff,0x7f,0x7f] +// GFX89: v_trunc_f32_e32 v0, 0x7f7ffffd ; encoding: [0xff,0x38,0x00,0x7e,0xfd,0xff,0x7f,0x7f] +// SICI: v_trunc_f32_e32 v0, 0x7f7ffffd ; encoding: [0xff,0x42,0x00,0x7e,0xfd,0xff,0x7f,0x7f] v_fract_f64 v[0:1], 2.3509886e-38 -// SICI: v_fract_f64_e32 v[0:1], 0x381fffff ; encoding: [0xff,0x7c,0x00,0x7e,0xff,0xff,0x1f,0x38] -// GFX89: v_fract_f64_e32 v[0:1], 0x381fffff ; encoding: [0xff,0x64,0x00,0x7e,0xff,0xff,0x1f,0x38] -// NOSICI: :[[@LINE-3]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero -// NOGFX89: :[[@LINE-4]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero // GFX11: v_fract_f64_e32 v[0:1], 0x381fffff ; encoding: [0xff,0x7c,0x00,0x7e,0xff,0xff,0x1f,0x38] // GFX12: v_fract_f64_e32 v[0:1], 0x381fffff ; encoding: [0xff,0x7c,0x00,0x7e,0xff,0xff,0x1f,0x38] // GFX1250: v_fract_f64_e32 v[0:1], 0x381fffffe8c9d9fb ; encoding: [0xfe,0x7c,0x00,0x7e,0xfb,0xd9,0xc9,0xe8,0xff,0xff,0x1f,0x38] -// NOGFX11: :[[@LINE-8]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero -// NOGFX12: :[[@LINE-9]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// GFX89: v_fract_f64_e32 v[0:1], 0x381fffff ; encoding: [0xff,0x64,0x00,0x7e,0xff,0xff,0x1f,0x38] +// NOGFX11: :[[@LINE-5]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// NOGFX12: :[[@LINE-6]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// NOGFX89: :[[@LINE-7]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// NOSICI: :[[@LINE-8]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// SICI: v_fract_f64_e32 v[0:1], 0x381fffff ; encoding: [0xff,0x7c,0x00,0x7e,0xff,0xff,0x1f,0x38] // NOSICIVI: :[[@LINE-3]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero v_trunc_f32 v0, 2.3509886e-38 -// SICI: v_trunc_f32_e32 v0, 0xffffff ; encoding: [0xff,0x42,0x00,0x7e,0xff,0xff,0xff,0x00] -// GFX89: v_trunc_f32_e32 v0, 0xffffff ; encoding: [0xff,0x38,0x00,0x7e,0xff,0xff,0xff,0x00] -// GFX12XX: v_trunc_f32_e32 v0, 0xffffff ; encoding: [0xff,0x42,0x00,0x7e,0xff,0xff,0xff,0x00] // GFX11: v_trunc_f32_e32 v0, 0xffffff ; encoding: [0xff,0x42,0x00,0x7e,0xff,0xff,0xff,0x00] +// GFX12XX: v_trunc_f32_e32 v0, 0xffffff ; encoding: [0xff,0x42,0x00,0x7e,0xff,0xff,0xff,0x00] +// GFX89: v_trunc_f32_e32 v0, 0xffffff ; encoding: [0xff,0x38,0x00,0x7e,0xff,0xff,0xff,0x00] +// SICI: v_trunc_f32_e32 v0, 0xffffff ; encoding: [0xff,0x42,0x00,0x7e,0xff,0xff,0xff,0x00] v_fract_f64 v[0:1], 2.3509886e-70 -// SICI: v_fract_f64_e32 v[0:1], 0x3179f623 ; encoding: [0xff,0x7c,0x00,0x7e,0x23,0xf6,0x79,0x31] -// GFX89: v_fract_f64_e32 v[0:1], 0x3179f623 ; encoding: [0xff,0x64,0x00,0x7e,0x23,0xf6,0x79,0x31] -// NOSICI: :[[@LINE-3]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero -// NOGFX89: :[[@LINE-4]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero // GFX11: v_fract_f64_e32 v[0:1], 0x3179f623 ; encoding: [0xff,0x7c,0x00,0x7e,0x23,0xf6,0x79,0x31] // GFX12: v_fract_f64_e32 v[0:1], 0x3179f623 ; encoding: [0xff,0x7c,0x00,0x7e,0x23,0xf6,0x79,0x31] // GFX1250: v_fract_f64_e32 v[0:1], 0x3179f623c2d3cf3c ; encoding: [0xfe,0x7c,0x00,0x7e,0x3c,0xcf,0xd3,0xc2,0x23,0xf6,0x79,0x31] -// NOGFX11: :[[@LINE-8]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero -// NOGFX12: :[[@LINE-9]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// GFX89: v_fract_f64_e32 v[0:1], 0x3179f623 ; encoding: [0xff,0x64,0x00,0x7e,0x23,0xf6,0x79,0x31] +// NOGFX11: :[[@LINE-5]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// NOGFX12: :[[@LINE-6]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// NOGFX89: :[[@LINE-7]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// NOSICI: :[[@LINE-8]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero +// SICI: v_fract_f64_e32 v[0:1], 0x3179f623 ; encoding: [0xff,0x7c,0x00,0x7e,0x23,0xf6,0x79,0x31] // NOSICIVI: :[[@LINE-3]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero v_trunc_f32 v0, 2.3509886e-70 // NOGCN: :[[@LINE-1]]:17: error: invalid operand for instruction v_fract_f64_e32 v[0:1], 1.0 -// SICI: v_fract_f64_e32 v[0:1], 1.0 ; encoding: [0xf2,0x7c,0x00,0x7e] -// GFX89: v_fract_f64_e32 v[0:1], 1.0 ; encoding: [0xf2,0x64,0x00,0x7e] -// GFX12XX: v_fract_f64_e32 v[0:1], 1.0 ; encoding: [0xf2,0x7c,0x00,0x7e] // GFX11: v_fract_f64_e32 v[0:1], 1.0 ; encoding: [0xf2,0x7c,0x00,0x7e] +// GFX12XX: v_fract_f64_e32 v[0:1], 1.0 ; encoding: [0xf2,0x7c,0x00,0x7e] +// GFX89: v_fract_f64_e32 v[0:1], 1.0 ; encoding: [0xf2,0x64,0x00,0x7e] +// SICI: v_fract_f64_e32 v[0:1], 1.0 ; encoding: [0xf2,0x7c,0x00,0x7e] v_fract_f64_e32 v[0:1], lit(1.0) -// SICI: v_fract_f64_e32 v[0:1], lit(0x3ff00000) ; encoding: [0xff,0x7c,0x00,0x7e,0x00,0x00,0xf0,0x3f] -// GFX89: v_fract_f64_e32 v[0:1], lit(0x3ff00000) ; encoding: [0xff,0x64,0x00,0x7e,0x00,0x00,0xf0,0x3f] // GFX11: v_fract_f64_e32 v[0:1], lit(0x3ff00000) ; encoding: [0xff,0x7c,0x00,0x7e,0x00,0x00,0xf0,0x3f] // GFX12: v_fract_f64_e32 v[0:1], lit(0x3ff00000) ; encoding: [0xff,0x7c,0x00,0x7e,0x00,0x00,0xf0,0x3f] // GFX1250: v_fract_f64_e32 v[0:1], lit(0x3ff00000) ; encoding: [0xfe,0x7c,0x00,0x7e,0x00,0x00,0xf0,0x3f,0x00,0x00,0x00,0x00] +// GFX89: v_fract_f64_e32 v[0:1], lit(0x3ff00000) ; encoding: [0xff,0x64,0x00,0x7e,0x00,0x00,0xf0,0x3f] +// SICI: v_fract_f64_e32 v[0:1], lit(0x3ff00000) ; encoding: [0xff,0x7c,0x00,0x7e,0x00,0x00,0xf0,0x3f] v_wmma_i32_16x16x16_iu8 v[8:15], v[0:3], v[4:7], 1.0 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_wmma_i32_16x16x16_iu8 v[8:15], v[0:3], v[4:7], 1.0 ; encoding: [0x08,0x40,0x44,0xcc,0x00,0x09,0xca,0x1b] -// NOGFX12: :[[@LINE-4]]:1: error: operands are not valid for this GPU or mode -// NOGFX1250: :[[@LINE-5]]:1: error: operands are not valid for this GPU or mode +// NOGFX12: :[[@LINE-2]]:1: error: operands are not valid for this GPU or mode +// NOGFX1250: :[[@LINE-3]]:1: error: operands are not valid for this GPU or mode +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_wmma_i32_16x16x16_iu8 v[8:15], v[0:3], v[4:7], lit(1.0) -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU -// NOGFX11: :[[@LINE-3]]:54: error: invalid operand for instruction -// NOGFX12: :[[@LINE-4]]:54: error: invalid operand for instruction -// NOGFX1250: :[[@LINE-5]]:54: error: invalid operand for instruction +// NOGFX11: :[[@LINE-1]]:54: error: invalid operand for instruction +// NOGFX12: :[[@LINE-2]]:54: error: invalid operand for instruction +// NOGFX1250: :[[@LINE-3]]:54: error: invalid operand for instruction +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_cos_f16_e32 v5.l, 1.0 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: operands are not valid for this GPU or mode // GFX11: v_cos_f16_e32 v5.l, 1.0 ; encoding: [0xf2,0xc2,0x0a,0x7e] // GFX1250: v_cos_f16_e32 v5.l, 1.0 ; encoding: [0xf2,0xc2,0x0a,0x7e] -// NOGFX12: :[[@LINE-5]]:1: error: operands are not valid for this GPU or mode +// NOGFX12: :[[@LINE-3]]:1: error: operands are not valid for this GPU or mode +// NOGFX89: :[[@LINE-4]]:1: error: operands are not valid for this GPU or mode +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_cos_f16_e32 v5.l, lit(1.0) -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: operands are not valid for this GPU or mode // GFX11: v_cos_f16_e32 v5.l, lit(0x3c00) ; encoding: [0xff,0xc2,0x0a,0x7e,0x00,0x3c,0x00,0x00] // GFX1250: v_cos_f16_e32 v5.l, lit(0x3c00) ; encoding: [0xff,0xc2,0x0a,0x7e,0x00,0x3c,0x00,0x00] -// NOGFX12: :[[@LINE-5]]:1: error: operands are not valid for this GPU or mode +// NOGFX12: :[[@LINE-3]]:1: error: operands are not valid for this GPU or mode +// NOGFX89: :[[@LINE-4]]:1: error: operands are not valid for this GPU or mode +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_tanh_bf16 v5, 1.0 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX1250: v_tanh_bf16_e32 v5, 1.0 ; encoding: [0xf2,0x94,0x0a,0x7e] -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_tanh_bf16 v5, lit(1.0) -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX1250: v_tanh_bf16_e32 v5, lit(0x3f80) ; encoding: [0xff,0x94,0x0a,0x7e,0x80,0x3f,0x00,0x00] -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_trunc_f32_e32 v0, 1.0 -// SICI: v_trunc_f32_e32 v0, 1.0 ; encoding: [0xf2,0x42,0x00,0x7e] -// GFX89: v_trunc_f32_e32 v0, 1.0 ; encoding: [0xf2,0x38,0x00,0x7e] -// GFX12XX: v_trunc_f32_e32 v0, 1.0 ; encoding: [0xf2,0x42,0x00,0x7e] // GFX11: v_trunc_f32_e32 v0, 1.0 ; encoding: [0xf2,0x42,0x00,0x7e] +// GFX12XX: v_trunc_f32_e32 v0, 1.0 ; encoding: [0xf2,0x42,0x00,0x7e] +// GFX89: v_trunc_f32_e32 v0, 1.0 ; encoding: [0xf2,0x38,0x00,0x7e] +// SICI: v_trunc_f32_e32 v0, 1.0 ; encoding: [0xf2,0x42,0x00,0x7e] v_trunc_f32_e32 v0, lit(1.0) -// SICI: v_trunc_f32_e32 v0, lit(0x3f800000) ; encoding: [0xff,0x42,0x00,0x7e,0x00,0x00,0x80,0x3f] -// GFX89: v_trunc_f32_e32 v0, lit(0x3f800000) ; encoding: [0xff,0x38,0x00,0x7e,0x00,0x00,0x80,0x3f] -// GFX12XX: v_trunc_f32_e32 v0, lit(0x3f800000) ; encoding: [0xff,0x42,0x00,0x7e,0x00,0x00,0x80,0x3f] // GFX11: v_trunc_f32_e32 v0, lit(0x3f800000) ; encoding: [0xff,0x42,0x00,0x7e,0x00,0x00,0x80,0x3f] +// GFX12XX: v_trunc_f32_e32 v0, lit(0x3f800000) ; encoding: [0xff,0x42,0x00,0x7e,0x00,0x00,0x80,0x3f] +// GFX89: v_trunc_f32_e32 v0, lit(0x3f800000) ; encoding: [0xff,0x38,0x00,0x7e,0x00,0x00,0x80,0x3f] +// SICI: v_trunc_f32_e32 v0, lit(0x3f800000) ; encoding: [0xff,0x42,0x00,0x7e,0x00,0x00,0x80,0x3f] v_dot2_bf16_bf16 v5.l, v1, v2, 1.0 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_dot2_bf16_bf16 v5.l, v1, v2, 1.0 ; encoding: [0x05,0x00,0x67,0xd6,0x01,0x05,0xca,0x03] -// NOGFX12: :[[@LINE-4]]:1: error: operands are not valid for this GPU or mode -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-2]]:1: error: operands are not valid for this GPU or mode +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_dot2_bf16_bf16 v5.l, v1, v2, lit(1.0) -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_dot2_bf16_bf16 v5.l, v1, v2, lit(0x3f80) ; encoding: [0x05,0x00,0x67,0xd6,0x01,0x05,0xfe,0x03,0x80,0x3f,0x00,0x00] -// NOGFX12: :[[@LINE-4]]:1: error: operands are not valid for this GPU or mode -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-2]]:1: error: operands are not valid for this GPU or mode +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_dot2_f32_f16 v5, v1, 1.0, v2 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_dot2_f32_f16 v5, v1, 1.0, v2 ; encoding: [0x05,0x40,0x13,0xcc,0x01,0xe5,0x09,0x1c] // GFX12: v_dot2_f32_f16 v5, v1, 1.0, v2 ; encoding: [0x05,0x40,0x13,0xcc,0x01,0xe5,0x09,0x1c] -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_dot2_f32_f16 v5, v1, lit(1.0), v2 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_dot2_f32_f16 v5, v1, lit(0x3c00), v2 ; encoding: [0x05,0x40,0x13,0xcc,0x01,0xff,0x09,0x1c,0x00,0x3c,0x00,0x00] // GFX12: v_dot2_f32_f16 v5, v1, lit(0x3c00), v2 ; encoding: [0x05,0x40,0x13,0xcc,0x01,0xff,0x09,0x1c,0x00,0x3c,0x00,0x00] -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_cvt_pk_fp8_f16 v1.l, 1.0 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX1250: v_cvt_pk_fp8_f16 v1.l, 0x3c00 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x3c,0x00,0x00] -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_cvt_pk_fp8_f16 v1.l, lit(1.0) -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX1250: v_cvt_pk_fp8_f16 v1.l, lit(0x3c00) ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x3c,0x00,0x00] -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU //---------------------------------------------------------------------------// // fp literal, expected int operand @@ -309,118 +309,118 @@ s_mov_b64 s[0:1], lit(0.5) // NOGCN: :[[@LINE-1]]:23: error: invalid operand for instruction v_and_b32_e32 v0, 0.5, v1 -// SICI: v_and_b32_e32 v0, 0.5, v1 ; encoding: [0xf0,0x02,0x00,0x36] -// GFX89: v_and_b32_e32 v0, 0.5, v1 ; encoding: [0xf0,0x02,0x00,0x26] -// GFX12XX: v_and_b32_e32 v0, 0.5, v1 ; encoding: [0xf0,0x02,0x00,0x36] // GFX11: v_and_b32_e32 v0, 0.5, v1 ; encoding: [0xf0,0x02,0x00,0x36] +// GFX12XX: v_and_b32_e32 v0, 0.5, v1 ; encoding: [0xf0,0x02,0x00,0x36] +// GFX89: v_and_b32_e32 v0, 0.5, v1 ; encoding: [0xf0,0x02,0x00,0x26] +// SICI: v_and_b32_e32 v0, 0.5, v1 ; encoding: [0xf0,0x02,0x00,0x36] v_and_b32_e64 v0, 0.5, v1 -// SICI: v_and_b32_e64 v0, 0.5, v1 ; encoding: [0x00,0x00,0x36,0xd2,0xf0,0x02,0x02,0x00] -// GFX89: v_and_b32_e64 v0, 0.5, v1 ; encoding: [0x00,0x00,0x13,0xd1,0xf0,0x02,0x02,0x00] -// GFX12XX: v_and_b32_e64 v0, 0.5, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xf0,0x02,0x02,0x00] // GFX11: v_and_b32_e64 v0, 0.5, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xf0,0x02,0x02,0x00] +// GFX12XX: v_and_b32_e64 v0, 0.5, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xf0,0x02,0x02,0x00] +// GFX89: v_and_b32_e64 v0, 0.5, v1 ; encoding: [0x00,0x00,0x13,0xd1,0xf0,0x02,0x02,0x00] +// SICI: v_and_b32_e64 v0, 0.5, v1 ; encoding: [0x00,0x00,0x36,0xd2,0xf0,0x02,0x02,0x00] s_mov_b64_e32 s[0:1], -1.0 // GFX8PLUS: s_mov_b64 s[0:1], -1.0 ; encoding: [0xf3,0x01,0x80,0xbe] // SICI: s_mov_b64 s[0:1], -1.0 ; encoding: [0xf3,0x04,0x80,0xbe] v_and_b32_e32 v0, -1.0, v1 -// SICI: v_and_b32_e32 v0, -1.0, v1 ; encoding: [0xf3,0x02,0x00,0x36] -// GFX89: v_and_b32_e32 v0, -1.0, v1 ; encoding: [0xf3,0x02,0x00,0x26] -// GFX12XX: v_and_b32_e32 v0, -1.0, v1 ; encoding: [0xf3,0x02,0x00,0x36] // GFX11: v_and_b32_e32 v0, -1.0, v1 ; encoding: [0xf3,0x02,0x00,0x36] +// GFX12XX: v_and_b32_e32 v0, -1.0, v1 ; encoding: [0xf3,0x02,0x00,0x36] +// GFX89: v_and_b32_e32 v0, -1.0, v1 ; encoding: [0xf3,0x02,0x00,0x26] +// SICI: v_and_b32_e32 v0, -1.0, v1 ; encoding: [0xf3,0x02,0x00,0x36] v_and_b32_e64 v0, -1.0, v1 -// SICI: v_and_b32_e64 v0, -1.0, v1 ; encoding: [0x00,0x00,0x36,0xd2,0xf3,0x02,0x02,0x00] -// GFX89: v_and_b32_e64 v0, -1.0, v1 ; encoding: [0x00,0x00,0x13,0xd1,0xf3,0x02,0x02,0x00] -// GFX12XX: v_and_b32_e64 v0, -1.0, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xf3,0x02,0x02,0x00] // GFX11: v_and_b32_e64 v0, -1.0, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xf3,0x02,0x02,0x00] +// GFX12XX: v_and_b32_e64 v0, -1.0, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xf3,0x02,0x02,0x00] +// GFX89: v_and_b32_e64 v0, -1.0, v1 ; encoding: [0x00,0x00,0x13,0xd1,0xf3,0x02,0x02,0x00] +// SICI: v_and_b32_e64 v0, -1.0, v1 ; encoding: [0x00,0x00,0x36,0xd2,0xf3,0x02,0x02,0x00] s_mov_b64_e32 s[0:1], 4.0 // GFX8PLUS: s_mov_b64 s[0:1], 4.0 ; encoding: [0xf6,0x01,0x80,0xbe] // SICI: s_mov_b64 s[0:1], 4.0 ; encoding: [0xf6,0x04,0x80,0xbe] v_and_b32_e32 v0, 4.0, v1 -// SICI: v_and_b32_e32 v0, 4.0, v1 ; encoding: [0xf6,0x02,0x00,0x36] -// GFX89: v_and_b32_e32 v0, 4.0, v1 ; encoding: [0xf6,0x02,0x00,0x26] -// GFX12XX: v_and_b32_e32 v0, 4.0, v1 ; encoding: [0xf6,0x02,0x00,0x36] // GFX11: v_and_b32_e32 v0, 4.0, v1 ; encoding: [0xf6,0x02,0x00,0x36] +// GFX12XX: v_and_b32_e32 v0, 4.0, v1 ; encoding: [0xf6,0x02,0x00,0x36] +// GFX89: v_and_b32_e32 v0, 4.0, v1 ; encoding: [0xf6,0x02,0x00,0x26] +// SICI: v_and_b32_e32 v0, 4.0, v1 ; encoding: [0xf6,0x02,0x00,0x36] v_and_b32_e64 v0, 4.0, v1 -// SICI: v_and_b32_e64 v0, 4.0, v1 ; encoding: [0x00,0x00,0x36,0xd2,0xf6,0x02,0x02,0x00] -// GFX89: v_and_b32_e64 v0, 4.0, v1 ; encoding: [0x00,0x00,0x13,0xd1,0xf6,0x02,0x02,0x00] -// GFX12XX: v_and_b32_e64 v0, 4.0, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xf6,0x02,0x02,0x00] // GFX11: v_and_b32_e64 v0, 4.0, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xf6,0x02,0x02,0x00] +// GFX12XX: v_and_b32_e64 v0, 4.0, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xf6,0x02,0x02,0x00] +// GFX89: v_and_b32_e64 v0, 4.0, v1 ; encoding: [0x00,0x00,0x13,0xd1,0xf6,0x02,0x02,0x00] +// SICI: v_and_b32_e64 v0, 4.0, v1 ; encoding: [0x00,0x00,0x36,0xd2,0xf6,0x02,0x02,0x00] s_mov_b64_e32 s[0:1], 0.0 // GFX8PLUS: s_mov_b64 s[0:1], 0 ; encoding: [0x80,0x01,0x80,0xbe] // SICI: s_mov_b64 s[0:1], 0 ; encoding: [0x80,0x04,0x80,0xbe] v_and_b32_e32 v0, 0.0, v1 -// SICI: v_and_b32_e32 v0, 0, v1 ; encoding: [0x80,0x02,0x00,0x36] -// GFX89: v_and_b32_e32 v0, 0, v1 ; encoding: [0x80,0x02,0x00,0x26] -// GFX12XX: v_and_b32_e32 v0, 0, v1 ; encoding: [0x80,0x02,0x00,0x36] // GFX11: v_and_b32_e32 v0, 0, v1 ; encoding: [0x80,0x02,0x00,0x36] +// GFX12XX: v_and_b32_e32 v0, 0, v1 ; encoding: [0x80,0x02,0x00,0x36] +// GFX89: v_and_b32_e32 v0, 0, v1 ; encoding: [0x80,0x02,0x00,0x26] +// SICI: v_and_b32_e32 v0, 0, v1 ; encoding: [0x80,0x02,0x00,0x36] v_and_b32_e64 v0, 0.0, v1 -// SICI: v_and_b32_e64 v0, 0, v1 ; encoding: [0x00,0x00,0x36,0xd2,0x80,0x02,0x02,0x00] -// GFX89: v_and_b32_e64 v0, 0, v1 ; encoding: [0x00,0x00,0x13,0xd1,0x80,0x02,0x02,0x00] -// GFX12XX: v_and_b32_e64 v0, 0, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0x80,0x02,0x02,0x00] // GFX11: v_and_b32_e64 v0, 0, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0x80,0x02,0x02,0x00] +// GFX12XX: v_and_b32_e64 v0, 0, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0x80,0x02,0x02,0x00] +// GFX89: v_and_b32_e64 v0, 0, v1 ; encoding: [0x00,0x00,0x13,0xd1,0x80,0x02,0x02,0x00] +// SICI: v_and_b32_e64 v0, 0, v1 ; encoding: [0x00,0x00,0x36,0xd2,0x80,0x02,0x02,0x00] s_mov_b64_e32 s[0:1], 1.5 // NOGCN: :[[@LINE-1]]:23: error: invalid operand for instruction v_and_b32_e32 v0, 1.5, v1 -// SICI: v_and_b32_e32 v0, 0x3fc00000, v1 ; encoding: [0xff,0x02,0x00,0x36,0x00,0x00,0xc0,0x3f] -// GFX89: v_and_b32_e32 v0, 0x3fc00000, v1 ; encoding: [0xff,0x02,0x00,0x26,0x00,0x00,0xc0,0x3f] -// GFX12XX: v_and_b32_e32 v0, 0x3fc00000, v1 ; encoding: [0xff,0x02,0x00,0x36,0x00,0x00,0xc0,0x3f] // GFX11: v_and_b32_e32 v0, 0x3fc00000, v1 ; encoding: [0xff,0x02,0x00,0x36,0x00,0x00,0xc0,0x3f] +// GFX12XX: v_and_b32_e32 v0, 0x3fc00000, v1 ; encoding: [0xff,0x02,0x00,0x36,0x00,0x00,0xc0,0x3f] +// GFX89: v_and_b32_e32 v0, 0x3fc00000, v1 ; encoding: [0xff,0x02,0x00,0x26,0x00,0x00,0xc0,0x3f] +// SICI: v_and_b32_e32 v0, 0x3fc00000, v1 ; encoding: [0xff,0x02,0x00,0x36,0x00,0x00,0xc0,0x3f] s_mov_b64_e32 s[0:1], -3.1415 // NOGCN: :[[@LINE-1]]:23: error: invalid operand for instruction v_and_b32_e32 v0, -3.1415, v1 -// SICI: v_and_b32_e32 v0, 0xc0490e56, v1 ; encoding: [0xff,0x02,0x00,0x36,0x56,0x0e,0x49,0xc0] -// GFX89: v_and_b32_e32 v0, 0xc0490e56, v1 ; encoding: [0xff,0x02,0x00,0x26,0x56,0x0e,0x49,0xc0] -// GFX12XX: v_and_b32_e32 v0, 0xc0490e56, v1 ; encoding: [0xff,0x02,0x00,0x36,0x56,0x0e,0x49,0xc0] // GFX11: v_and_b32_e32 v0, 0xc0490e56, v1 ; encoding: [0xff,0x02,0x00,0x36,0x56,0x0e,0x49,0xc0] +// GFX12XX: v_and_b32_e32 v0, 0xc0490e56, v1 ; encoding: [0xff,0x02,0x00,0x36,0x56,0x0e,0x49,0xc0] +// GFX89: v_and_b32_e32 v0, 0xc0490e56, v1 ; encoding: [0xff,0x02,0x00,0x26,0x56,0x0e,0x49,0xc0] +// SICI: v_and_b32_e32 v0, 0xc0490e56, v1 ; encoding: [0xff,0x02,0x00,0x36,0x56,0x0e,0x49,0xc0] s_mov_b64_e32 s[0:1], 100000000000000000000000.0 // NOGCN: :[[@LINE-1]]:23: error: invalid operand for instruction v_and_b32_e32 v0, 100000000000000000000000.0, v1 -// SICI: v_and_b32_e32 v0, 0x65a96816, v1 ; encoding: [0xff,0x02,0x00,0x36,0x16,0x68,0xa9,0x65] -// GFX89: v_and_b32_e32 v0, 0x65a96816, v1 ; encoding: [0xff,0x02,0x00,0x26,0x16,0x68,0xa9,0x65] -// GFX12XX: v_and_b32_e32 v0, 0x65a96816, v1 ; encoding: [0xff,0x02,0x00,0x36,0x16,0x68,0xa9,0x65] // GFX11: v_and_b32_e32 v0, 0x65a96816, v1 ; encoding: [0xff,0x02,0x00,0x36,0x16,0x68,0xa9,0x65] +// GFX12XX: v_and_b32_e32 v0, 0x65a96816, v1 ; encoding: [0xff,0x02,0x00,0x36,0x16,0x68,0xa9,0x65] +// GFX89: v_and_b32_e32 v0, 0x65a96816, v1 ; encoding: [0xff,0x02,0x00,0x26,0x16,0x68,0xa9,0x65] +// SICI: v_and_b32_e32 v0, 0x65a96816, v1 ; encoding: [0xff,0x02,0x00,0x36,0x16,0x68,0xa9,0x65] s_mov_b64_e32 s[0:1], 10000000.0 // NOGCN: :[[@LINE-1]]:23: error: invalid operand for instruction v_and_b32_e32 v0, 10000000.0, v1 -// SICI: v_and_b32_e32 v0, 0x4b189680, v1 ; encoding: [0xff,0x02,0x00,0x36,0x80,0x96,0x18,0x4b] -// GFX89: v_and_b32_e32 v0, 0x4b189680, v1 ; encoding: [0xff,0x02,0x00,0x26,0x80,0x96,0x18,0x4b] -// GFX12XX: v_and_b32_e32 v0, 0x4b189680, v1 ; encoding: [0xff,0x02,0x00,0x36,0x80,0x96,0x18,0x4b] // GFX11: v_and_b32_e32 v0, 0x4b189680, v1 ; encoding: [0xff,0x02,0x00,0x36,0x80,0x96,0x18,0x4b] +// GFX12XX: v_and_b32_e32 v0, 0x4b189680, v1 ; encoding: [0xff,0x02,0x00,0x36,0x80,0x96,0x18,0x4b] +// GFX89: v_and_b32_e32 v0, 0x4b189680, v1 ; encoding: [0xff,0x02,0x00,0x26,0x80,0x96,0x18,0x4b] +// SICI: v_and_b32_e32 v0, 0x4b189680, v1 ; encoding: [0xff,0x02,0x00,0x36,0x80,0x96,0x18,0x4b] s_mov_b64_e32 s[0:1], 3.402823e+38 // NOGCN: :[[@LINE-1]]:23: error: invalid operand for instruction v_and_b32_e32 v0, 3.402823e+38, v1 -// SICI: v_and_b32_e32 v0, 0x7f7ffffd, v1 ; encoding: [0xff,0x02,0x00,0x36,0xfd,0xff,0x7f,0x7f] -// GFX89: v_and_b32_e32 v0, 0x7f7ffffd, v1 ; encoding: [0xff,0x02,0x00,0x26,0xfd,0xff,0x7f,0x7f] -// GFX12XX: v_and_b32_e32 v0, 0x7f7ffffd, v1 ; encoding: [0xff,0x02,0x00,0x36,0xfd,0xff,0x7f,0x7f] // GFX11: v_and_b32_e32 v0, 0x7f7ffffd, v1 ; encoding: [0xff,0x02,0x00,0x36,0xfd,0xff,0x7f,0x7f] +// GFX12XX: v_and_b32_e32 v0, 0x7f7ffffd, v1 ; encoding: [0xff,0x02,0x00,0x36,0xfd,0xff,0x7f,0x7f] +// GFX89: v_and_b32_e32 v0, 0x7f7ffffd, v1 ; encoding: [0xff,0x02,0x00,0x26,0xfd,0xff,0x7f,0x7f] +// SICI: v_and_b32_e32 v0, 0x7f7ffffd, v1 ; encoding: [0xff,0x02,0x00,0x36,0xfd,0xff,0x7f,0x7f] s_mov_b64_e32 s[0:1], 2.3509886e-38 // NOGCN: :[[@LINE-1]]:23: error: invalid operand for instruction v_and_b32_e32 v0, 2.3509886e-38, v1 -// SICI: v_and_b32_e32 v0, 0xffffff, v1 ; encoding: [0xff,0x02,0x00,0x36,0xff,0xff,0xff,0x00] -// GFX89: v_and_b32_e32 v0, 0xffffff, v1 ; encoding: [0xff,0x02,0x00,0x26,0xff,0xff,0xff,0x00] -// GFX12XX: v_and_b32_e32 v0, 0xffffff, v1 ; encoding: [0xff,0x02,0x00,0x36,0xff,0xff,0xff,0x00] // GFX11: v_and_b32_e32 v0, 0xffffff, v1 ; encoding: [0xff,0x02,0x00,0x36,0xff,0xff,0xff,0x00] +// GFX12XX: v_and_b32_e32 v0, 0xffffff, v1 ; encoding: [0xff,0x02,0x00,0x36,0xff,0xff,0xff,0x00] +// GFX89: v_and_b32_e32 v0, 0xffffff, v1 ; encoding: [0xff,0x02,0x00,0x26,0xff,0xff,0xff,0x00] +// SICI: v_and_b32_e32 v0, 0xffffff, v1 ; encoding: [0xff,0x02,0x00,0x36,0xff,0xff,0xff,0x00] s_mov_b64_e32 s[0:1], 2.3509886e-70 // NOGCN: :[[@LINE-1]]:23: error: invalid operand for instruction @@ -429,322 +429,322 @@ v_and_b32_e32 v0, 2.3509886e-70, v1 // NOGCN: :[[@LINE-1]]:19: error: invalid operand for instruction v_not_b16 v5.l, 1.0 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_not_b16_e32 v5.l, 1.0 ; encoding: [0xf2,0xd2,0x0a,0x7e] // GFX1250: v_not_b16_e32 v5.l, 1.0 ; encoding: [0xf2,0xd2,0x0a,0x7e] -// NOGFX12: :[[@LINE-5]]:1: error: operands are not valid for this GPU or mode +// NOGFX12: :[[@LINE-3]]:1: error: operands are not valid for this GPU or mode +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_not_b16 v5.l, lit(1.0) -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_not_b16_e32 v5.l, lit(0x3f800000) ; encoding: [0xff,0xd2,0x0a,0x7e,0x00,0x00,0x80,0x3f] // GFX1250: v_not_b16_e32 v5.l, lit(0x3f800000) ; encoding: [0xff,0xd2,0x0a,0x7e,0x00,0x00,0x80,0x3f] -// NOGFX12: :[[@LINE-5]]:1: error: operands are not valid for this GPU or mode +// NOGFX12: :[[@LINE-3]]:1: error: operands are not valid for this GPU or mode +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_and_b32_e32 v0, 1.0, v1 -// SICI: v_and_b32_e32 v0, 1.0, v1 ; encoding: [0xf2,0x02,0x00,0x36] -// GFX89: v_and_b32_e32 v0, 1.0, v1 ; encoding: [0xf2,0x02,0x00,0x26] -// GFX12XX: v_and_b32_e32 v0, 1.0, v1 ; encoding: [0xf2,0x02,0x00,0x36] // GFX11: v_and_b32_e32 v0, 1.0, v1 ; encoding: [0xf2,0x02,0x00,0x36] +// GFX12XX: v_and_b32_e32 v0, 1.0, v1 ; encoding: [0xf2,0x02,0x00,0x36] +// GFX89: v_and_b32_e32 v0, 1.0, v1 ; encoding: [0xf2,0x02,0x00,0x26] +// SICI: v_and_b32_e32 v0, 1.0, v1 ; encoding: [0xf2,0x02,0x00,0x36] v_and_b32_e32 v0, lit(1.0), v1 -// SICI: v_and_b32_e32 v0, lit(0x3f800000), v1 ; encoding: [0xff,0x02,0x00,0x36,0x00,0x00,0x80,0x3f] -// GFX89: v_and_b32_e32 v0, lit(0x3f800000), v1 ; encoding: [0xff,0x02,0x00,0x26,0x00,0x00,0x80,0x3f] -// GFX12XX: v_and_b32_e32 v0, lit(0x3f800000), v1 ; encoding: [0xff,0x02,0x00,0x36,0x00,0x00,0x80,0x3f] // GFX11: v_and_b32_e32 v0, lit(0x3f800000), v1 ; encoding: [0xff,0x02,0x00,0x36,0x00,0x00,0x80,0x3f] +// GFX12XX: v_and_b32_e32 v0, lit(0x3f800000), v1 ; encoding: [0xff,0x02,0x00,0x36,0x00,0x00,0x80,0x3f] +// GFX89: v_and_b32_e32 v0, lit(0x3f800000), v1 ; encoding: [0xff,0x02,0x00,0x26,0x00,0x00,0x80,0x3f] +// SICI: v_and_b32_e32 v0, lit(0x3f800000), v1 ; encoding: [0xff,0x02,0x00,0x36,0x00,0x00,0x80,0x3f] v_pk_add_u16 v5, exec_lo, 1.0 +// GFX11: v_pk_add_u16 v5, exec_lo, 1.0 ; encoding: [0x05,0x40,0x0a,0xcc,0x7e,0xe4,0x01,0x18] // GFX12XX: v_pk_add_u16 v5, exec_lo, 1.0 ; encoding: [0x05,0x40,0x0a,0xcc,0x7e,0xe4,0x01,0x18] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX9: v_pk_add_u16 v5, exec_lo, 1.0 ; encoding: [0x05,0x40,0x8a,0xd3,0x7e,0xe4,0x01,0x18] -// GFX11: v_pk_add_u16 v5, exec_lo, 1.0 ; encoding: [0x05,0x40,0x0a,0xcc,0x7e,0xe4,0x01,0x18] +// NOSICI: :[[@LINE-4]]:1: error: instruction not supported on this GPU // NOVI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_pk_add_u16 v5, exec_lo, lit(1.0) -// GFX12XX: v_pk_add_u16 v5, exec_lo, lit(0x3f800000) ; encoding: [0x05,0x40,0x0a,0xcc,0x7e,0xfe,0x01,0x18,0x00,0x00,0x80,0x3f] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_pk_add_u16 v5, exec_lo, lit(0x3f800000) ; encoding: [0x05,0x40,0x0a,0xcc,0x7e,0xfe,0x01,0x18,0x00,0x00,0x80,0x3f] -// NOVI: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX9: :[[@LINE-5]]:31: error: invalid operand (violates constant bus restrictions) +// GFX12XX: v_pk_add_u16 v5, exec_lo, lit(0x3f800000) ; encoding: [0x05,0x40,0x0a,0xcc,0x7e,0xfe,0x01,0x18,0x00,0x00,0x80,0x3f] +// NOGFX9: :[[@LINE-3]]:31: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_perm_pk16_b6_u4 v[2:4], v4, v[4:5], 1.0 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX1250: v_perm_pk16_b6_u4 v[2:4], v4, v[4:5], 1.0 ; encoding: [0x02,0x00,0x42,0xd6,0x04,0x09,0xca,0x03] -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_perm_pk16_b6_u4 v[2:4], v4, v[4:5], lit(1.0) -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX1250: v_perm_pk16_b6_u4 v[2:4], v4, v[4:5], lit(0x3f800000) ; encoding: [0x02,0x00,0x42,0xd6,0x04,0x09,0xfe,0x03,0x00,0x00,0x80,0x3f] -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU //---------------------------------------------------------------------------// // int literal, expected fp operand //---------------------------------------------------------------------------// v_trunc_f32_e32 v0, 0 -// SICI: v_trunc_f32_e32 v0, 0 ; encoding: [0x80,0x42,0x00,0x7e] -// GFX89: v_trunc_f32_e32 v0, 0 ; encoding: [0x80,0x38,0x00,0x7e] -// GFX12XX: v_trunc_f32_e32 v0, 0 ; encoding: [0x80,0x42,0x00,0x7e] // GFX11: v_trunc_f32_e32 v0, 0 ; encoding: [0x80,0x42,0x00,0x7e] +// GFX12XX: v_trunc_f32_e32 v0, 0 ; encoding: [0x80,0x42,0x00,0x7e] +// GFX89: v_trunc_f32_e32 v0, 0 ; encoding: [0x80,0x38,0x00,0x7e] +// SICI: v_trunc_f32_e32 v0, 0 ; encoding: [0x80,0x42,0x00,0x7e] v_fract_f64_e32 v[0:1], 1 -// SICI: v_fract_f64_e32 v[0:1], 1 ; encoding: [0x81,0x7c,0x00,0x7e] -// GFX89: v_fract_f64_e32 v[0:1], 1 ; encoding: [0x81,0x64,0x00,0x7e] -// GFX12XX: v_fract_f64_e32 v[0:1], 1 ; encoding: [0x81,0x7c,0x00,0x7e] // GFX11: v_fract_f64_e32 v[0:1], 1 ; encoding: [0x81,0x7c,0x00,0x7e] +// GFX12XX: v_fract_f64_e32 v[0:1], 1 ; encoding: [0x81,0x7c,0x00,0x7e] +// GFX89: v_fract_f64_e32 v[0:1], 1 ; encoding: [0x81,0x64,0x00,0x7e] +// SICI: v_fract_f64_e32 v[0:1], 1 ; encoding: [0x81,0x7c,0x00,0x7e] v_fract_f64_e32 v[0:1], lit(1) -// SICI: v_fract_f64_e32 v[0:1], lit(0x1) ; encoding: [0xff,0x7c,0x00,0x7e,0x01,0x00,0x00,0x00] -// GFX89: v_fract_f64_e32 v[0:1], lit(0x1) ; encoding: [0xff,0x64,0x00,0x7e,0x01,0x00,0x00,0x00] // GFX11: v_fract_f64_e32 v[0:1], lit(0x1) ; encoding: [0xff,0x7c,0x00,0x7e,0x01,0x00,0x00,0x00] // GFX12: v_fract_f64_e32 v[0:1], lit(0x1) ; encoding: [0xff,0x7c,0x00,0x7e,0x01,0x00,0x00,0x00] // GFX1250: v_fract_f64_e32 v[0:1], lit(0x1) ; encoding: [0xfe,0x7c,0x00,0x7e,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00] +// GFX89: v_fract_f64_e32 v[0:1], lit(0x1) ; encoding: [0xff,0x64,0x00,0x7e,0x01,0x00,0x00,0x00] +// SICI: v_fract_f64_e32 v[0:1], lit(0x1) ; encoding: [0xff,0x7c,0x00,0x7e,0x01,0x00,0x00,0x00] v_trunc_f32_e64 v0, 0 -// SICI: v_trunc_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x42,0xd3,0x80,0x00,0x00,0x00] -// GFX89: v_trunc_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x5c,0xd1,0x80,0x00,0x00,0x00] -// GFX12XX: v_trunc_f32_e64 v0, 0 ; encoding: [0x00,0x00,0xa1,0xd5,0x80,0x00,0x00,0x00] // GFX11: v_trunc_f32_e64 v0, 0 ; encoding: [0x00,0x00,0xa1,0xd5,0x80,0x00,0x00,0x00] +// GFX12XX: v_trunc_f32_e64 v0, 0 ; encoding: [0x00,0x00,0xa1,0xd5,0x80,0x00,0x00,0x00] +// GFX89: v_trunc_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x5c,0xd1,0x80,0x00,0x00,0x00] +// SICI: v_trunc_f32_e64 v0, 0 ; encoding: [0x00,0x00,0x42,0xd3,0x80,0x00,0x00,0x00] v_fract_f64_e64 v[0:1], 0 -// SICI: v_fract_f64_e64 v[0:1], 0 ; encoding: [0x00,0x00,0x7c,0xd3,0x80,0x00,0x00,0x00] -// GFX89: v_fract_f64_e64 v[0:1], 0 ; encoding: [0x00,0x00,0x72,0xd1,0x80,0x00,0x00,0x00] -// GFX12XX: v_fract_f64_e64 v[0:1], 0 ; encoding: [0x00,0x00,0xbe,0xd5,0x80,0x00,0x00,0x00] // GFX11: v_fract_f64_e64 v[0:1], 0 ; encoding: [0x00,0x00,0xbe,0xd5,0x80,0x00,0x00,0x00] +// GFX12XX: v_fract_f64_e64 v[0:1], 0 ; encoding: [0x00,0x00,0xbe,0xd5,0x80,0x00,0x00,0x00] +// GFX89: v_fract_f64_e64 v[0:1], 0 ; encoding: [0x00,0x00,0x72,0xd1,0x80,0x00,0x00,0x00] +// SICI: v_fract_f64_e64 v[0:1], 0 ; encoding: [0x00,0x00,0x7c,0xd3,0x80,0x00,0x00,0x00] v_trunc_f32_e32 v0, -13 -// SICI: v_trunc_f32_e32 v0, -13 ; encoding: [0xcd,0x42,0x00,0x7e] -// GFX89: v_trunc_f32_e32 v0, -13 ; encoding: [0xcd,0x38,0x00,0x7e] -// GFX12XX: v_trunc_f32_e32 v0, -13 ; encoding: [0xcd,0x42,0x00,0x7e] // GFX11: v_trunc_f32_e32 v0, -13 ; encoding: [0xcd,0x42,0x00,0x7e] +// GFX12XX: v_trunc_f32_e32 v0, -13 ; encoding: [0xcd,0x42,0x00,0x7e] +// GFX89: v_trunc_f32_e32 v0, -13 ; encoding: [0xcd,0x38,0x00,0x7e] +// SICI: v_trunc_f32_e32 v0, -13 ; encoding: [0xcd,0x42,0x00,0x7e] v_fract_f64_e32 v[0:1], -13 -// SICI: v_fract_f64_e32 v[0:1], -13 ; encoding: [0xcd,0x7c,0x00,0x7e] -// GFX89: v_fract_f64_e32 v[0:1], -13 ; encoding: [0xcd,0x64,0x00,0x7e] -// GFX12XX: v_fract_f64_e32 v[0:1], -13 ; encoding: [0xcd,0x7c,0x00,0x7e] // GFX11: v_fract_f64_e32 v[0:1], -13 ; encoding: [0xcd,0x7c,0x00,0x7e] +// GFX12XX: v_fract_f64_e32 v[0:1], -13 ; encoding: [0xcd,0x7c,0x00,0x7e] +// GFX89: v_fract_f64_e32 v[0:1], -13 ; encoding: [0xcd,0x64,0x00,0x7e] +// SICI: v_fract_f64_e32 v[0:1], -13 ; encoding: [0xcd,0x7c,0x00,0x7e] v_trunc_f32_e64 v0, -13 -// SICI: v_trunc_f32_e64 v0, -13 ; encoding: [0x00,0x00,0x42,0xd3,0xcd,0x00,0x00,0x00] -// GFX89: v_trunc_f32_e64 v0, -13 ; encoding: [0x00,0x00,0x5c,0xd1,0xcd,0x00,0x00,0x00] -// GFX12XX: v_trunc_f32_e64 v0, -13 ; encoding: [0x00,0x00,0xa1,0xd5,0xcd,0x00,0x00,0x00] // GFX11: v_trunc_f32_e64 v0, -13 ; encoding: [0x00,0x00,0xa1,0xd5,0xcd,0x00,0x00,0x00] +// GFX12XX: v_trunc_f32_e64 v0, -13 ; encoding: [0x00,0x00,0xa1,0xd5,0xcd,0x00,0x00,0x00] +// GFX89: v_trunc_f32_e64 v0, -13 ; encoding: [0x00,0x00,0x5c,0xd1,0xcd,0x00,0x00,0x00] +// SICI: v_trunc_f32_e64 v0, -13 ; encoding: [0x00,0x00,0x42,0xd3,0xcd,0x00,0x00,0x00] v_fract_f64_e64 v[0:1], -13 -// SICI: v_fract_f64_e64 v[0:1], -13 ; encoding: [0x00,0x00,0x7c,0xd3,0xcd,0x00,0x00,0x00] -// GFX89: v_fract_f64_e64 v[0:1], -13 ; encoding: [0x00,0x00,0x72,0xd1,0xcd,0x00,0x00,0x00] -// GFX12XX: v_fract_f64_e64 v[0:1], -13 ; encoding: [0x00,0x00,0xbe,0xd5,0xcd,0x00,0x00,0x00] // GFX11: v_fract_f64_e64 v[0:1], -13 ; encoding: [0x00,0x00,0xbe,0xd5,0xcd,0x00,0x00,0x00] +// GFX12XX: v_fract_f64_e64 v[0:1], -13 ; encoding: [0x00,0x00,0xbe,0xd5,0xcd,0x00,0x00,0x00] +// GFX89: v_fract_f64_e64 v[0:1], -13 ; encoding: [0x00,0x00,0x72,0xd1,0xcd,0x00,0x00,0x00] +// SICI: v_fract_f64_e64 v[0:1], -13 ; encoding: [0x00,0x00,0x7c,0xd3,0xcd,0x00,0x00,0x00] v_trunc_f32_e32 v0, 35 -// SICI: v_trunc_f32_e32 v0, 35 ; encoding: [0xa3,0x42,0x00,0x7e] -// GFX89: v_trunc_f32_e32 v0, 35 ; encoding: [0xa3,0x38,0x00,0x7e] -// GFX12XX: v_trunc_f32_e32 v0, 35 ; encoding: [0xa3,0x42,0x00,0x7e] // GFX11: v_trunc_f32_e32 v0, 35 ; encoding: [0xa3,0x42,0x00,0x7e] +// GFX12XX: v_trunc_f32_e32 v0, 35 ; encoding: [0xa3,0x42,0x00,0x7e] +// GFX89: v_trunc_f32_e32 v0, 35 ; encoding: [0xa3,0x38,0x00,0x7e] +// SICI: v_trunc_f32_e32 v0, 35 ; encoding: [0xa3,0x42,0x00,0x7e] v_fract_f64_e32 v[0:1], 35 -// SICI: v_fract_f64_e32 v[0:1], 35 ; encoding: [0xa3,0x7c,0x00,0x7e] -// GFX89: v_fract_f64_e32 v[0:1], 35 ; encoding: [0xa3,0x64,0x00,0x7e] -// GFX12XX: v_fract_f64_e32 v[0:1], 35 ; encoding: [0xa3,0x7c,0x00,0x7e] // GFX11: v_fract_f64_e32 v[0:1], 35 ; encoding: [0xa3,0x7c,0x00,0x7e] +// GFX12XX: v_fract_f64_e32 v[0:1], 35 ; encoding: [0xa3,0x7c,0x00,0x7e] +// GFX89: v_fract_f64_e32 v[0:1], 35 ; encoding: [0xa3,0x64,0x00,0x7e] +// SICI: v_fract_f64_e32 v[0:1], 35 ; encoding: [0xa3,0x7c,0x00,0x7e] v_trunc_f32_e64 v0, 35 -// SICI: v_trunc_f32_e64 v0, 35 ; encoding: [0x00,0x00,0x42,0xd3,0xa3,0x00,0x00,0x00] -// GFX89: v_trunc_f32_e64 v0, 35 ; encoding: [0x00,0x00,0x5c,0xd1,0xa3,0x00,0x00,0x00] -// GFX12XX: v_trunc_f32_e64 v0, 35 ; encoding: [0x00,0x00,0xa1,0xd5,0xa3,0x00,0x00,0x00] // GFX11: v_trunc_f32_e64 v0, 35 ; encoding: [0x00,0x00,0xa1,0xd5,0xa3,0x00,0x00,0x00] +// GFX12XX: v_trunc_f32_e64 v0, 35 ; encoding: [0x00,0x00,0xa1,0xd5,0xa3,0x00,0x00,0x00] +// GFX89: v_trunc_f32_e64 v0, 35 ; encoding: [0x00,0x00,0x5c,0xd1,0xa3,0x00,0x00,0x00] +// SICI: v_trunc_f32_e64 v0, 35 ; encoding: [0x00,0x00,0x42,0xd3,0xa3,0x00,0x00,0x00] v_fract_f64_e64 v[0:1], 35 -// SICI: v_fract_f64_e64 v[0:1], 35 ; encoding: [0x00,0x00,0x7c,0xd3,0xa3,0x00,0x00,0x00] -// GFX89: v_fract_f64_e64 v[0:1], 35 ; encoding: [0x00,0x00,0x72,0xd1,0xa3,0x00,0x00,0x00] -// GFX12XX: v_fract_f64_e64 v[0:1], 35 ; encoding: [0x00,0x00,0xbe,0xd5,0xa3,0x00,0x00,0x00] // GFX11: v_fract_f64_e64 v[0:1], 35 ; encoding: [0x00,0x00,0xbe,0xd5,0xa3,0x00,0x00,0x00] +// GFX12XX: v_fract_f64_e64 v[0:1], 35 ; encoding: [0x00,0x00,0xbe,0xd5,0xa3,0x00,0x00,0x00] +// GFX89: v_fract_f64_e64 v[0:1], 35 ; encoding: [0x00,0x00,0x72,0xd1,0xa3,0x00,0x00,0x00] +// SICI: v_fract_f64_e64 v[0:1], 35 ; encoding: [0x00,0x00,0x7c,0xd3,0xa3,0x00,0x00,0x00] v_trunc_f32_e32 v0, 1234 -// SICI: v_trunc_f32_e32 v0, 0x4d2 ; encoding: [0xff,0x42,0x00,0x7e,0xd2,0x04,0x00,0x00] -// GFX89: v_trunc_f32_e32 v0, 0x4d2 ; encoding: [0xff,0x38,0x00,0x7e,0xd2,0x04,0x00,0x00] -// GFX12XX: v_trunc_f32_e32 v0, 0x4d2 ; encoding: [0xff,0x42,0x00,0x7e,0xd2,0x04,0x00,0x00] // GFX11: v_trunc_f32_e32 v0, 0x4d2 ; encoding: [0xff,0x42,0x00,0x7e,0xd2,0x04,0x00,0x00] +// GFX12XX: v_trunc_f32_e32 v0, 0x4d2 ; encoding: [0xff,0x42,0x00,0x7e,0xd2,0x04,0x00,0x00] +// GFX89: v_trunc_f32_e32 v0, 0x4d2 ; encoding: [0xff,0x38,0x00,0x7e,0xd2,0x04,0x00,0x00] +// SICI: v_trunc_f32_e32 v0, 0x4d2 ; encoding: [0xff,0x42,0x00,0x7e,0xd2,0x04,0x00,0x00] v_fract_f64_e32 v[0:1], 1234 -// SICI: v_fract_f64_e32 v[0:1], 0x4d2 ; encoding: [0xff,0x7c,0x00,0x7e,0xd2,0x04,0x00,0x00] -// GFX89: v_fract_f64_e32 v[0:1], 0x4d2 ; encoding: [0xff,0x64,0x00,0x7e,0xd2,0x04,0x00,0x00] -// GFX12XX: v_fract_f64_e32 v[0:1], 0x4d2 ; encoding: [0xff,0x7c,0x00,0x7e,0xd2,0x04,0x00,0x00] // GFX11: v_fract_f64_e32 v[0:1], 0x4d2 ; encoding: [0xff,0x7c,0x00,0x7e,0xd2,0x04,0x00,0x00] +// GFX12XX: v_fract_f64_e32 v[0:1], 0x4d2 ; encoding: [0xff,0x7c,0x00,0x7e,0xd2,0x04,0x00,0x00] +// GFX89: v_fract_f64_e32 v[0:1], 0x4d2 ; encoding: [0xff,0x64,0x00,0x7e,0xd2,0x04,0x00,0x00] +// SICI: v_fract_f64_e32 v[0:1], 0x4d2 ; encoding: [0xff,0x7c,0x00,0x7e,0xd2,0x04,0x00,0x00] v_trunc_f32_e64 v0, 1234 +// GFX11: v_trunc_f32_e64 v0, 0x4d2 ; encoding: [0x00,0x00,0xa1,0xd5,0xff,0x00,0x00,0x00,0xd2,0x04,0x00,0x00] // GFX12XX: v_trunc_f32_e64 v0, 0x4d2 ; encoding: [0x00,0x00,0xa1,0xd5,0xff,0x00,0x00,0x00,0xd2,0x04,0x00,0x00] -// NOSICI: :[[@LINE-2]]:21: error: literal operands are not supported // NOGFX89: :[[@LINE-3]]:21: error: literal operands are not supported -// GFX11: v_trunc_f32_e64 v0, 0x4d2 ; encoding: [0x00,0x00,0xa1,0xd5,0xff,0x00,0x00,0x00,0xd2,0x04,0x00,0x00] +// NOSICI: :[[@LINE-4]]:21: error: literal operands are not supported // NOSICIVI: :[[@LINE-1]]:21: error: literal operands are not supported v_fract_f64_e64 v[0:1], 1234 +// GFX11: v_fract_f64_e64 v[0:1], 0x4d2 ; encoding: [0x00,0x00,0xbe,0xd5,0xff,0x00,0x00,0x00,0xd2,0x04,0x00,0x00] // GFX12XX: v_fract_f64_e64 v[0:1], 0x4d2 ; encoding: [0x00,0x00,0xbe,0xd5,0xff,0x00,0x00,0x00,0xd2,0x04,0x00,0x00] -// NOSICI: :[[@LINE-2]]:25: error: literal operands are not supported // NOGFX89: :[[@LINE-3]]:25: error: literal operands are not supported -// GFX11: v_fract_f64_e64 v[0:1], 0x4d2 ; encoding: [0x00,0x00,0xbe,0xd5,0xff,0x00,0x00,0x00,0xd2,0x04,0x00,0x00] +// NOSICI: :[[@LINE-4]]:25: error: literal operands are not supported // NOSICIVI: :[[@LINE-1]]:25: error: literal operands are not supported v_trunc_f32_e32 v0, -54321 -// SICI: v_trunc_f32_e32 v0, 0xffff2bcf ; encoding: [0xff,0x42,0x00,0x7e,0xcf,0x2b,0xff,0xff] -// GFX89: v_trunc_f32_e32 v0, 0xffff2bcf ; encoding: [0xff,0x38,0x00,0x7e,0xcf,0x2b,0xff,0xff] -// GFX12XX: v_trunc_f32_e32 v0, 0xffff2bcf ; encoding: [0xff,0x42,0x00,0x7e,0xcf,0x2b,0xff,0xff] // GFX11: v_trunc_f32_e32 v0, 0xffff2bcf ; encoding: [0xff,0x42,0x00,0x7e,0xcf,0x2b,0xff,0xff] +// GFX12XX: v_trunc_f32_e32 v0, 0xffff2bcf ; encoding: [0xff,0x42,0x00,0x7e,0xcf,0x2b,0xff,0xff] +// GFX89: v_trunc_f32_e32 v0, 0xffff2bcf ; encoding: [0xff,0x38,0x00,0x7e,0xcf,0x2b,0xff,0xff] +// SICI: v_trunc_f32_e32 v0, 0xffff2bcf ; encoding: [0xff,0x42,0x00,0x7e,0xcf,0x2b,0xff,0xff] v_fract_f64_e32 v[0:1], -54321 -// SICI: v_fract_f64_e32 v[0:1], 0xffff2bcf ; encoding: [0xff,0x7c,0x00,0x7e,0xcf,0x2b,0xff,0xff] -// GFX89: v_fract_f64_e32 v[0:1], 0xffff2bcf ; encoding: [0xff,0x64,0x00,0x7e,0xcf,0x2b,0xff,0xff] -// GFX12XX: v_fract_f64_e32 v[0:1], 0xffff2bcf ; encoding: [0xff,0x7c,0x00,0x7e,0xcf,0x2b,0xff,0xff] // GFX11: v_fract_f64_e32 v[0:1], 0xffff2bcf ; encoding: [0xff,0x7c,0x00,0x7e,0xcf,0x2b,0xff,0xff] +// GFX12XX: v_fract_f64_e32 v[0:1], 0xffff2bcf ; encoding: [0xff,0x7c,0x00,0x7e,0xcf,0x2b,0xff,0xff] +// GFX89: v_fract_f64_e32 v[0:1], 0xffff2bcf ; encoding: [0xff,0x64,0x00,0x7e,0xcf,0x2b,0xff,0xff] +// SICI: v_fract_f64_e32 v[0:1], 0xffff2bcf ; encoding: [0xff,0x7c,0x00,0x7e,0xcf,0x2b,0xff,0xff] v_trunc_f32_e32 v0, 0xdeadbeef -// SICI: v_trunc_f32_e32 v0, 0xdeadbeef ; encoding: [0xff,0x42,0x00,0x7e,0xef,0xbe,0xad,0xde] -// GFX89: v_trunc_f32_e32 v0, 0xdeadbeef ; encoding: [0xff,0x38,0x00,0x7e,0xef,0xbe,0xad,0xde] -// GFX12XX: v_trunc_f32_e32 v0, 0xdeadbeef ; encoding: [0xff,0x42,0x00,0x7e,0xef,0xbe,0xad,0xde] // GFX11: v_trunc_f32_e32 v0, 0xdeadbeef ; encoding: [0xff,0x42,0x00,0x7e,0xef,0xbe,0xad,0xde] +// GFX12XX: v_trunc_f32_e32 v0, 0xdeadbeef ; encoding: [0xff,0x42,0x00,0x7e,0xef,0xbe,0xad,0xde] +// GFX89: v_trunc_f32_e32 v0, 0xdeadbeef ; encoding: [0xff,0x38,0x00,0x7e,0xef,0xbe,0xad,0xde] +// SICI: v_trunc_f32_e32 v0, 0xdeadbeef ; encoding: [0xff,0x42,0x00,0x7e,0xef,0xbe,0xad,0xde] v_fract_f64_e32 v[0:1], 0xdeadbeef -// SICI: v_fract_f64_e32 v[0:1], 0xdeadbeef ; encoding: [0xff,0x7c,0x00,0x7e,0xef,0xbe,0xad,0xde] -// GFX89: v_fract_f64_e32 v[0:1], 0xdeadbeef ; encoding: [0xff,0x64,0x00,0x7e,0xef,0xbe,0xad,0xde] -// GFX12XX: v_fract_f64_e32 v[0:1], 0xdeadbeef ; encoding: [0xff,0x7c,0x00,0x7e,0xef,0xbe,0xad,0xde] // GFX11: v_fract_f64_e32 v[0:1], 0xdeadbeef ; encoding: [0xff,0x7c,0x00,0x7e,0xef,0xbe,0xad,0xde] +// GFX12XX: v_fract_f64_e32 v[0:1], 0xdeadbeef ; encoding: [0xff,0x7c,0x00,0x7e,0xef,0xbe,0xad,0xde] +// GFX89: v_fract_f64_e32 v[0:1], 0xdeadbeef ; encoding: [0xff,0x64,0x00,0x7e,0xef,0xbe,0xad,0xde] +// SICI: v_fract_f64_e32 v[0:1], 0xdeadbeef ; encoding: [0xff,0x7c,0x00,0x7e,0xef,0xbe,0xad,0xde] v_trunc_f32_e32 v0, 0xffffffff -// SICI: v_trunc_f32_e32 v0, -1 ; encoding: [0xc1,0x42,0x00,0x7e] -// GFX89: v_trunc_f32_e32 v0, -1 ; encoding: [0xc1,0x38,0x00,0x7e] -// GFX12XX: v_trunc_f32_e32 v0, -1 ; encoding: [0xc1,0x42,0x00,0x7e] // GFX11: v_trunc_f32_e32 v0, -1 ; encoding: [0xc1,0x42,0x00,0x7e] +// GFX12XX: v_trunc_f32_e32 v0, -1 ; encoding: [0xc1,0x42,0x00,0x7e] +// GFX89: v_trunc_f32_e32 v0, -1 ; encoding: [0xc1,0x38,0x00,0x7e] +// SICI: v_trunc_f32_e32 v0, -1 ; encoding: [0xc1,0x42,0x00,0x7e] v_fract_f64_e32 v[0:1], 0xffffffff -// SICI: v_fract_f64_e32 v[0:1], 0xffffffff ; encoding: [0xff,0x7c,0x00,0x7e,0xff,0xff,0xff,0xff] -// GFX89: v_fract_f64_e32 v[0:1], 0xffffffff ; encoding: [0xff,0x64,0x00,0x7e,0xff,0xff,0xff,0xff] -// GFX12XX: v_fract_f64_e32 v[0:1], 0xffffffff ; encoding: [0xff,0x7c,0x00,0x7e,0xff,0xff,0xff,0xff] // GFX11: v_fract_f64_e32 v[0:1], 0xffffffff ; encoding: [0xff,0x7c,0x00,0x7e,0xff,0xff,0xff,0xff] +// GFX12XX: v_fract_f64_e32 v[0:1], 0xffffffff ; encoding: [0xff,0x7c,0x00,0x7e,0xff,0xff,0xff,0xff] +// GFX89: v_fract_f64_e32 v[0:1], 0xffffffff ; encoding: [0xff,0x64,0x00,0x7e,0xff,0xff,0xff,0xff] +// SICI: v_fract_f64_e32 v[0:1], 0xffffffff ; encoding: [0xff,0x7c,0x00,0x7e,0xff,0xff,0xff,0xff] v_trunc_f32_e32 v0, 0x123456789abcdef0 // NOGCN: :[[@LINE-1]]:21: error: invalid operand for instruction v_fract_f64_e32 v[0:1], 0x123456789abcdef0 -// NOSICI: :[[@LINE-1]]:25: error: invalid operand for instruction -// NOGFX89: :[[@LINE-2]]:25: error: invalid operand for instruction // GFX1250: v_fract_f64_e32 v[0:1], 0x123456789abcdef0 ; encoding: [0xfe,0x7c,0x00,0x7e,0xf0,0xde,0xbc,0x9a,0x78,0x56,0x34,0x12] -// NOGFX11: :[[@LINE-4]]:25: error: invalid operand for instruction -// NOGFX12: :[[@LINE-5]]:25: error: invalid operand for instruction +// NOGFX11: :[[@LINE-2]]:25: error: invalid operand for instruction +// NOGFX12: :[[@LINE-3]]:25: error: invalid operand for instruction +// NOGFX89: :[[@LINE-4]]:25: error: invalid operand for instruction +// NOSICI: :[[@LINE-5]]:25: error: invalid operand for instruction // NOSICIVI: :[[@LINE-1]]:25: error: invalid operand for instruction v_trunc_f32_e32 v0, 0xffffffffffffffff -// SICI: v_trunc_f32_e32 v0, -1 ; encoding: [0xc1,0x42,0x00,0x7e] -// GFX89: v_trunc_f32_e32 v0, -1 ; encoding: [0xc1,0x38,0x00,0x7e] -// GFX12XX: v_trunc_f32_e32 v0, -1 ; encoding: [0xc1,0x42,0x00,0x7e] // GFX11: v_trunc_f32_e32 v0, -1 ; encoding: [0xc1,0x42,0x00,0x7e] +// GFX12XX: v_trunc_f32_e32 v0, -1 ; encoding: [0xc1,0x42,0x00,0x7e] +// GFX89: v_trunc_f32_e32 v0, -1 ; encoding: [0xc1,0x38,0x00,0x7e] +// SICI: v_trunc_f32_e32 v0, -1 ; encoding: [0xc1,0x42,0x00,0x7e] v_fract_f64_e32 v[0:1], 0xffffffffffffffff -// SICI: v_fract_f64_e32 v[0:1], -1 ; encoding: [0xc1,0x7c,0x00,0x7e] -// GFX89: v_fract_f64_e32 v[0:1], -1 ; encoding: [0xc1,0x64,0x00,0x7e] -// GFX12XX: v_fract_f64_e32 v[0:1], -1 ; encoding: [0xc1,0x7c,0x00,0x7e] // GFX11: v_fract_f64_e32 v[0:1], -1 ; encoding: [0xc1,0x7c,0x00,0x7e] +// GFX12XX: v_fract_f64_e32 v[0:1], -1 ; encoding: [0xc1,0x7c,0x00,0x7e] +// GFX89: v_fract_f64_e32 v[0:1], -1 ; encoding: [0xc1,0x64,0x00,0x7e] +// SICI: v_fract_f64_e32 v[0:1], -1 ; encoding: [0xc1,0x7c,0x00,0x7e] v_wmma_i32_16x16x16_iu8 v[8:15], v[0:3], v[4:7], 1 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_wmma_i32_16x16x16_iu8 v[8:15], v[0:3], v[4:7], 1 ; encoding: [0x08,0x40,0x44,0xcc,0x00,0x09,0x06,0x1a] -// NOGFX12: :[[@LINE-4]]:1: error: operands are not valid for this GPU or mode -// NOGFX1250: :[[@LINE-5]]:1: error: operands are not valid for this GPU or mode +// NOGFX12: :[[@LINE-2]]:1: error: operands are not valid for this GPU or mode +// NOGFX1250: :[[@LINE-3]]:1: error: operands are not valid for this GPU or mode +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_wmma_i32_16x16x16_iu8 v[8:15], v[0:3], v[4:7], lit(1) -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU -// NOGFX11: :[[@LINE-3]]:54: error: invalid operand for instruction -// NOGFX12: :[[@LINE-4]]:54: error: invalid operand for instruction -// NOGFX1250: :[[@LINE-5]]:54: error: invalid operand for instruction +// NOGFX11: :[[@LINE-1]]:54: error: invalid operand for instruction +// NOGFX12: :[[@LINE-2]]:54: error: invalid operand for instruction +// NOGFX1250: :[[@LINE-3]]:54: error: invalid operand for instruction +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_cos_f16_e32 v5.l, 1 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: operands are not valid for this GPU or mode // GFX11: v_cos_f16_e32 v5.l, 1 ; encoding: [0x81,0xc2,0x0a,0x7e] // GFX1250: v_cos_f16_e32 v5.l, 1 ; encoding: [0x81,0xc2,0x0a,0x7e] -// NOGFX12: :[[@LINE-5]]:1: error: operands are not valid for this GPU or mode +// NOGFX12: :[[@LINE-3]]:1: error: operands are not valid for this GPU or mode +// NOGFX89: :[[@LINE-4]]:1: error: operands are not valid for this GPU or mode +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_cos_f16_e32 v5.l, lit(1) -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: operands are not valid for this GPU or mode // GFX11: v_cos_f16_e32 v5.l, lit(0x1) ; encoding: [0xff,0xc2,0x0a,0x7e,0x01,0x00,0x00,0x00] // GFX1250: v_cos_f16_e32 v5.l, lit(0x1) ; encoding: [0xff,0xc2,0x0a,0x7e,0x01,0x00,0x00,0x00] -// NOGFX12: :[[@LINE-5]]:1: error: operands are not valid for this GPU or mode +// NOGFX12: :[[@LINE-3]]:1: error: operands are not valid for this GPU or mode +// NOGFX89: :[[@LINE-4]]:1: error: operands are not valid for this GPU or mode +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_tanh_bf16 v5, 1 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX1250: v_tanh_bf16_e32 v5, 1 ; encoding: [0x81,0x94,0x0a,0x7e] -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_tanh_bf16 v5, lit(1) -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX1250: v_tanh_bf16_e32 v5, lit(0x1) ; encoding: [0xff,0x94,0x0a,0x7e,0x01,0x00,0x00,0x00] -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_trunc_f32_e32 v0, 1 -// SICI: v_trunc_f32_e32 v0, 1 ; encoding: [0x81,0x42,0x00,0x7e] -// GFX89: v_trunc_f32_e32 v0, 1 ; encoding: [0x81,0x38,0x00,0x7e] -// GFX12XX: v_trunc_f32_e32 v0, 1 ; encoding: [0x81,0x42,0x00,0x7e] // GFX11: v_trunc_f32_e32 v0, 1 ; encoding: [0x81,0x42,0x00,0x7e] +// GFX12XX: v_trunc_f32_e32 v0, 1 ; encoding: [0x81,0x42,0x00,0x7e] +// GFX89: v_trunc_f32_e32 v0, 1 ; encoding: [0x81,0x38,0x00,0x7e] +// SICI: v_trunc_f32_e32 v0, 1 ; encoding: [0x81,0x42,0x00,0x7e] v_trunc_f32_e32 v0, lit(1) -// SICI: v_trunc_f32_e32 v0, lit(0x1) ; encoding: [0xff,0x42,0x00,0x7e,0x01,0x00,0x00,0x00] -// GFX89: v_trunc_f32_e32 v0, lit(0x1) ; encoding: [0xff,0x38,0x00,0x7e,0x01,0x00,0x00,0x00] -// GFX12XX: v_trunc_f32_e32 v0, lit(0x1) ; encoding: [0xff,0x42,0x00,0x7e,0x01,0x00,0x00,0x00] // GFX11: v_trunc_f32_e32 v0, lit(0x1) ; encoding: [0xff,0x42,0x00,0x7e,0x01,0x00,0x00,0x00] +// GFX12XX: v_trunc_f32_e32 v0, lit(0x1) ; encoding: [0xff,0x42,0x00,0x7e,0x01,0x00,0x00,0x00] +// GFX89: v_trunc_f32_e32 v0, lit(0x1) ; encoding: [0xff,0x38,0x00,0x7e,0x01,0x00,0x00,0x00] +// SICI: v_trunc_f32_e32 v0, lit(0x1) ; encoding: [0xff,0x42,0x00,0x7e,0x01,0x00,0x00,0x00] v_dot2_bf16_bf16 v5.l, v1, v2, 1 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_dot2_bf16_bf16 v5.l, v1, v2, 1 ; encoding: [0x05,0x00,0x67,0xd6,0x01,0x05,0x06,0x02] -// NOGFX12: :[[@LINE-4]]:1: error: operands are not valid for this GPU or mode -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-2]]:1: error: operands are not valid for this GPU or mode +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_dot2_bf16_bf16 v5.l, v1, v2, lit(1) -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_dot2_bf16_bf16 v5.l, v1, v2, lit(0x1) ; encoding: [0x05,0x00,0x67,0xd6,0x01,0x05,0xfe,0x03,0x01,0x00,0x00,0x00] -// NOGFX12: :[[@LINE-4]]:1: error: operands are not valid for this GPU or mode -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-2]]:1: error: operands are not valid for this GPU or mode +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_dot2_f32_f16 v5, v1, 1, v2 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_dot2_f32_f16 v5, v1, 1, v2 ; encoding: [0x05,0x40,0x13,0xcc,0x01,0x03,0x09,0x1c] // GFX12: v_dot2_f32_f16 v5, v1, 1, v2 ; encoding: [0x05,0x40,0x13,0xcc,0x01,0x03,0x09,0x1c] -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_dot2_f32_f16 v5, v1, lit(1), v2 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_dot2_f32_f16 v5, v1, lit(0x1), v2 ; encoding: [0x05,0x40,0x13,0xcc,0x01,0xff,0x09,0x1c,0x01,0x00,0x00,0x00] // GFX12: v_dot2_f32_f16 v5, v1, lit(0x1), v2 ; encoding: [0x05,0x40,0x13,0xcc,0x01,0xff,0x09,0x1c,0x01,0x00,0x00,0x00] -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_cvt_pk_fp8_f16 v1.l, 1 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX1250: v_cvt_pk_fp8_f16 v1.l, 1 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00] -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_cvt_pk_fp8_f16 v1.l, lit(1) -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX1250: v_cvt_pk_fp8_f16 v1.l, lit(0x1) ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00] -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU //---------------------------------------------------------------------------// // int literal, expected int operand @@ -755,111 +755,111 @@ s_mov_b64_e32 s[0:1], 0 // SICI: s_mov_b64 s[0:1], 0 ; encoding: [0x80,0x04,0x80,0xbe] v_and_b32_e32 v0, 0, v1 -// SICI: v_and_b32_e32 v0, 0, v1 ; encoding: [0x80,0x02,0x00,0x36] -// GFX89: v_and_b32_e32 v0, 0, v1 ; encoding: [0x80,0x02,0x00,0x26] -// GFX12XX: v_and_b32_e32 v0, 0, v1 ; encoding: [0x80,0x02,0x00,0x36] // GFX11: v_and_b32_e32 v0, 0, v1 ; encoding: [0x80,0x02,0x00,0x36] +// GFX12XX: v_and_b32_e32 v0, 0, v1 ; encoding: [0x80,0x02,0x00,0x36] +// GFX89: v_and_b32_e32 v0, 0, v1 ; encoding: [0x80,0x02,0x00,0x26] +// SICI: v_and_b32_e32 v0, 0, v1 ; encoding: [0x80,0x02,0x00,0x36] v_and_b32_e64 v0, 0, v1 -// SICI: v_and_b32_e64 v0, 0, v1 ; encoding: [0x00,0x00,0x36,0xd2,0x80,0x02,0x02,0x00] -// GFX89: v_and_b32_e64 v0, 0, v1 ; encoding: [0x00,0x00,0x13,0xd1,0x80,0x02,0x02,0x00] -// GFX12XX: v_and_b32_e64 v0, 0, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0x80,0x02,0x02,0x00] // GFX11: v_and_b32_e64 v0, 0, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0x80,0x02,0x02,0x00] +// GFX12XX: v_and_b32_e64 v0, 0, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0x80,0x02,0x02,0x00] +// GFX89: v_and_b32_e64 v0, 0, v1 ; encoding: [0x00,0x00,0x13,0xd1,0x80,0x02,0x02,0x00] +// SICI: v_and_b32_e64 v0, 0, v1 ; encoding: [0x00,0x00,0x36,0xd2,0x80,0x02,0x02,0x00] s_mov_b64_e32 s[0:1], -13 // GFX8PLUS: s_mov_b64 s[0:1], -13 ; encoding: [0xcd,0x01,0x80,0xbe] // SICI: s_mov_b64 s[0:1], -13 ; encoding: [0xcd,0x04,0x80,0xbe] v_and_b32_e32 v0, -13, v1 -// SICI: v_and_b32_e32 v0, -13, v1 ; encoding: [0xcd,0x02,0x00,0x36] -// GFX89: v_and_b32_e32 v0, -13, v1 ; encoding: [0xcd,0x02,0x00,0x26] -// GFX12XX: v_and_b32_e32 v0, -13, v1 ; encoding: [0xcd,0x02,0x00,0x36] // GFX11: v_and_b32_e32 v0, -13, v1 ; encoding: [0xcd,0x02,0x00,0x36] +// GFX12XX: v_and_b32_e32 v0, -13, v1 ; encoding: [0xcd,0x02,0x00,0x36] +// GFX89: v_and_b32_e32 v0, -13, v1 ; encoding: [0xcd,0x02,0x00,0x26] +// SICI: v_and_b32_e32 v0, -13, v1 ; encoding: [0xcd,0x02,0x00,0x36] v_and_b32_e64 v0, -13, v1 -// SICI: v_and_b32_e64 v0, -13, v1 ; encoding: [0x00,0x00,0x36,0xd2,0xcd,0x02,0x02,0x00] -// GFX89: v_and_b32_e64 v0, -13, v1 ; encoding: [0x00,0x00,0x13,0xd1,0xcd,0x02,0x02,0x00] -// GFX12XX: v_and_b32_e64 v0, -13, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xcd,0x02,0x02,0x00] // GFX11: v_and_b32_e64 v0, -13, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xcd,0x02,0x02,0x00] +// GFX12XX: v_and_b32_e64 v0, -13, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xcd,0x02,0x02,0x00] +// GFX89: v_and_b32_e64 v0, -13, v1 ; encoding: [0x00,0x00,0x13,0xd1,0xcd,0x02,0x02,0x00] +// SICI: v_and_b32_e64 v0, -13, v1 ; encoding: [0x00,0x00,0x36,0xd2,0xcd,0x02,0x02,0x00] s_mov_b64_e32 s[0:1], 35 // GFX8PLUS: s_mov_b64 s[0:1], 35 ; encoding: [0xa3,0x01,0x80,0xbe] // SICI: s_mov_b64 s[0:1], 35 ; encoding: [0xa3,0x04,0x80,0xbe] v_and_b32_e32 v0, 35, v1 -// SICI: v_and_b32_e32 v0, 35, v1 ; encoding: [0xa3,0x02,0x00,0x36] -// GFX89: v_and_b32_e32 v0, 35, v1 ; encoding: [0xa3,0x02,0x00,0x26] -// GFX12XX: v_and_b32_e32 v0, 35, v1 ; encoding: [0xa3,0x02,0x00,0x36] // GFX11: v_and_b32_e32 v0, 35, v1 ; encoding: [0xa3,0x02,0x00,0x36] +// GFX12XX: v_and_b32_e32 v0, 35, v1 ; encoding: [0xa3,0x02,0x00,0x36] +// GFX89: v_and_b32_e32 v0, 35, v1 ; encoding: [0xa3,0x02,0x00,0x26] +// SICI: v_and_b32_e32 v0, 35, v1 ; encoding: [0xa3,0x02,0x00,0x36] v_and_b32_e64 v0, 35, v1 -// SICI: v_and_b32_e64 v0, 35, v1 ; encoding: [0x00,0x00,0x36,0xd2,0xa3,0x02,0x02,0x00] -// GFX89: v_and_b32_e64 v0, 35, v1 ; encoding: [0x00,0x00,0x13,0xd1,0xa3,0x02,0x02,0x00] -// GFX12XX: v_and_b32_e64 v0, 35, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xa3,0x02,0x02,0x00] // GFX11: v_and_b32_e64 v0, 35, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xa3,0x02,0x02,0x00] +// GFX12XX: v_and_b32_e64 v0, 35, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xa3,0x02,0x02,0x00] +// GFX89: v_and_b32_e64 v0, 35, v1 ; encoding: [0x00,0x00,0x13,0xd1,0xa3,0x02,0x02,0x00] +// SICI: v_and_b32_e64 v0, 35, v1 ; encoding: [0x00,0x00,0x36,0xd2,0xa3,0x02,0x02,0x00] s_mov_b64_e32 s[0:1], 1234 // GFX8PLUS: s_mov_b64 s[0:1], 0x4d2 ; encoding: [0xff,0x01,0x80,0xbe,0xd2,0x04,0x00,0x00] // SICI: s_mov_b64 s[0:1], 0x4d2 ; encoding: [0xff,0x04,0x80,0xbe,0xd2,0x04,0x00,0x00] v_and_b32_e32 v0, 1234, v1 -// SICI: v_and_b32_e32 v0, 0x4d2, v1 ; encoding: [0xff,0x02,0x00,0x36,0xd2,0x04,0x00,0x00] -// GFX89: v_and_b32_e32 v0, 0x4d2, v1 ; encoding: [0xff,0x02,0x00,0x26,0xd2,0x04,0x00,0x00] -// GFX12XX: v_and_b32_e32 v0, 0x4d2, v1 ; encoding: [0xff,0x02,0x00,0x36,0xd2,0x04,0x00,0x00] // GFX11: v_and_b32_e32 v0, 0x4d2, v1 ; encoding: [0xff,0x02,0x00,0x36,0xd2,0x04,0x00,0x00] +// GFX12XX: v_and_b32_e32 v0, 0x4d2, v1 ; encoding: [0xff,0x02,0x00,0x36,0xd2,0x04,0x00,0x00] +// GFX89: v_and_b32_e32 v0, 0x4d2, v1 ; encoding: [0xff,0x02,0x00,0x26,0xd2,0x04,0x00,0x00] +// SICI: v_and_b32_e32 v0, 0x4d2, v1 ; encoding: [0xff,0x02,0x00,0x36,0xd2,0x04,0x00,0x00] v_and_b32_e64 v0, 1234, v1 +// GFX11: v_and_b32_e64 v0, 0x4d2, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xff,0x02,0x02,0x00,0xd2,0x04,0x00,0x00] // GFX12XX: v_and_b32_e64 v0, 0x4d2, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xff,0x02,0x02,0x00,0xd2,0x04,0x00,0x00] -// NOSICI: :[[@LINE-2]]:19: error: literal operands are not supported // NOGFX89: :[[@LINE-3]]:19: error: literal operands are not supported -// GFX11: v_and_b32_e64 v0, 0x4d2, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xff,0x02,0x02,0x00,0xd2,0x04,0x00,0x00] +// NOSICI: :[[@LINE-4]]:19: error: literal operands are not supported // NOSICIVI: :[[@LINE-1]]:19: error: literal operands are not supported s_mov_b64_e32 s[0:1], -54321 -// SICI: s_mov_b64 s[0:1], 0xffff2bcf ; encoding: [0xff,0x04,0x80,0xbe,0xcf,0x2b,0xff,0xff] -// GFX89: s_mov_b64 s[0:1], 0xffff2bcf ; encoding: [0xff,0x01,0x80,0xbe,0xcf,0x2b,0xff,0xff] // GFX11: s_mov_b64 s[0:1], 0xffff2bcf ; encoding: [0xff,0x01,0x80,0xbe,0xcf,0x2b,0xff,0xff] // GFX12: s_mov_b64 s[0:1], 0xffff2bcf ; encoding: [0xff,0x01,0x80,0xbe,0xcf,0x2b,0xff,0xff] // GFX1250: s_mov_b64 s[0:1], 0xffffffffffff2bcf ; encoding: [0xfe,0x01,0x80,0xbe,0xcf,0x2b,0xff,0xff,0xff,0xff,0xff,0xff] +// GFX89: s_mov_b64 s[0:1], 0xffff2bcf ; encoding: [0xff,0x01,0x80,0xbe,0xcf,0x2b,0xff,0xff] +// SICI: s_mov_b64 s[0:1], 0xffff2bcf ; encoding: [0xff,0x04,0x80,0xbe,0xcf,0x2b,0xff,0xff] v_and_b32_e32 v0, -54321, v1 -// SICI: v_and_b32_e32 v0, 0xffff2bcf, v1 ; encoding: [0xff,0x02,0x00,0x36,0xcf,0x2b,0xff,0xff] -// GFX89: v_and_b32_e32 v0, 0xffff2bcf, v1 ; encoding: [0xff,0x02,0x00,0x26,0xcf,0x2b,0xff,0xff] -// GFX12XX: v_and_b32_e32 v0, 0xffff2bcf, v1 ; encoding: [0xff,0x02,0x00,0x36,0xcf,0x2b,0xff,0xff] // GFX11: v_and_b32_e32 v0, 0xffff2bcf, v1 ; encoding: [0xff,0x02,0x00,0x36,0xcf,0x2b,0xff,0xff] +// GFX12XX: v_and_b32_e32 v0, 0xffff2bcf, v1 ; encoding: [0xff,0x02,0x00,0x36,0xcf,0x2b,0xff,0xff] +// GFX89: v_and_b32_e32 v0, 0xffff2bcf, v1 ; encoding: [0xff,0x02,0x00,0x26,0xcf,0x2b,0xff,0xff] +// SICI: v_and_b32_e32 v0, 0xffff2bcf, v1 ; encoding: [0xff,0x02,0x00,0x36,0xcf,0x2b,0xff,0xff] s_mov_b64_e32 s[0:1], 0xdeadbeef -// SICI: s_mov_b64 s[0:1], 0xdeadbeef ; encoding: [0xff,0x04,0x80,0xbe,0xef,0xbe,0xad,0xde] -// GFX89: s_mov_b64 s[0:1], 0xdeadbeef ; encoding: [0xff,0x01,0x80,0xbe,0xef,0xbe,0xad,0xde] // GFX11: s_mov_b64 s[0:1], 0xdeadbeef ; encoding: [0xff,0x01,0x80,0xbe,0xef,0xbe,0xad,0xde] // GFX12: s_mov_b64 s[0:1], 0xdeadbeef ; encoding: [0xff,0x01,0x80,0xbe,0xef,0xbe,0xad,0xde] // GFX1250: s_mov_b64 s[0:1], 0xdeadbeef ; encoding: [0xfe,0x01,0x80,0xbe,0xef,0xbe,0xad,0xde,0x00,0x00,0x00,0x00] +// GFX89: s_mov_b64 s[0:1], 0xdeadbeef ; encoding: [0xff,0x01,0x80,0xbe,0xef,0xbe,0xad,0xde] +// SICI: s_mov_b64 s[0:1], 0xdeadbeef ; encoding: [0xff,0x04,0x80,0xbe,0xef,0xbe,0xad,0xde] v_and_b32_e32 v0, 0xdeadbeef, v1 -// SICI: v_and_b32_e32 v0, 0xdeadbeef, v1 ; encoding: [0xff,0x02,0x00,0x36,0xef,0xbe,0xad,0xde] -// GFX89: v_and_b32_e32 v0, 0xdeadbeef, v1 ; encoding: [0xff,0x02,0x00,0x26,0xef,0xbe,0xad,0xde] -// GFX12XX: v_and_b32_e32 v0, 0xdeadbeef, v1 ; encoding: [0xff,0x02,0x00,0x36,0xef,0xbe,0xad,0xde] // GFX11: v_and_b32_e32 v0, 0xdeadbeef, v1 ; encoding: [0xff,0x02,0x00,0x36,0xef,0xbe,0xad,0xde] +// GFX12XX: v_and_b32_e32 v0, 0xdeadbeef, v1 ; encoding: [0xff,0x02,0x00,0x36,0xef,0xbe,0xad,0xde] +// GFX89: v_and_b32_e32 v0, 0xdeadbeef, v1 ; encoding: [0xff,0x02,0x00,0x26,0xef,0xbe,0xad,0xde] +// SICI: v_and_b32_e32 v0, 0xdeadbeef, v1 ; encoding: [0xff,0x02,0x00,0x36,0xef,0xbe,0xad,0xde] s_mov_b64_e32 s[0:1], 0xffffffff -// SICI: s_mov_b64 s[0:1], 0xffffffff ; encoding: [0xff,0x04,0x80,0xbe,0xff,0xff,0xff,0xff] -// GFX89: s_mov_b64 s[0:1], 0xffffffff ; encoding: [0xff,0x01,0x80,0xbe,0xff,0xff,0xff,0xff] // GFX11: s_mov_b64 s[0:1], 0xffffffff ; encoding: [0xff,0x01,0x80,0xbe,0xff,0xff,0xff,0xff] // GFX12: s_mov_b64 s[0:1], 0xffffffff ; encoding: [0xff,0x01,0x80,0xbe,0xff,0xff,0xff,0xff] // GFX1250: s_mov_b64 s[0:1], 0xffffffff ; encoding: [0xfe,0x01,0x80,0xbe,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00] +// GFX89: s_mov_b64 s[0:1], 0xffffffff ; encoding: [0xff,0x01,0x80,0xbe,0xff,0xff,0xff,0xff] +// SICI: s_mov_b64 s[0:1], 0xffffffff ; encoding: [0xff,0x04,0x80,0xbe,0xff,0xff,0xff,0xff] v_and_b32_e32 v0, 0xffffffff, v1 -// SICI: v_and_b32_e32 v0, -1, v1 ; encoding: [0xc1,0x02,0x00,0x36] -// GFX89: v_and_b32_e32 v0, -1, v1 ; encoding: [0xc1,0x02,0x00,0x26] -// GFX12XX: v_and_b32_e32 v0, -1, v1 ; encoding: [0xc1,0x02,0x00,0x36] // GFX11: v_and_b32_e32 v0, -1, v1 ; encoding: [0xc1,0x02,0x00,0x36] +// GFX12XX: v_and_b32_e32 v0, -1, v1 ; encoding: [0xc1,0x02,0x00,0x36] +// GFX89: v_and_b32_e32 v0, -1, v1 ; encoding: [0xc1,0x02,0x00,0x26] +// SICI: v_and_b32_e32 v0, -1, v1 ; encoding: [0xc1,0x02,0x00,0x36] s_mov_b64_e32 s[0:1], 0x123456789abcdef0 -// NOSICI: :[[@LINE-1]]:23: error: invalid operand for instruction -// NOGFX89: :[[@LINE-2]]:23: error: invalid operand for instruction // GFX1250: s_mov_b64 s[0:1], 0x123456789abcdef0 ; encoding: [0xfe,0x01,0x80,0xbe,0xf0,0xde,0xbc,0x9a,0x78,0x56,0x34,0x12] -// NOGFX11: :[[@LINE-4]]:23: error: invalid operand for instruction -// NOGFX12: :[[@LINE-5]]:23: error: invalid operand for instruction +// NOGFX11: :[[@LINE-2]]:23: error: invalid operand for instruction +// NOGFX12: :[[@LINE-3]]:23: error: invalid operand for instruction +// NOGFX89: :[[@LINE-4]]:23: error: invalid operand for instruction +// NOSICI: :[[@LINE-5]]:23: error: invalid operand for instruction // NOSICIVI: :[[@LINE-1]]:23: error: invalid operand for instruction v_and_b32_e32 v0, 0x123456789abcdef0, v1 @@ -870,75 +870,75 @@ s_mov_b64_e32 s[0:1], 0xffffffffffffffff // SICI: s_mov_b64 s[0:1], -1 ; encoding: [0xc1,0x04,0x80,0xbe] v_and_b32_e32 v0, 0xffffffffffffffff, v1 -// SICI: v_and_b32_e32 v0, -1, v1 ; encoding: [0xc1,0x02,0x00,0x36] -// GFX89: v_and_b32_e32 v0, -1, v1 ; encoding: [0xc1,0x02,0x00,0x26] -// GFX12XX: v_and_b32_e32 v0, -1, v1 ; encoding: [0xc1,0x02,0x00,0x36] // GFX11: v_and_b32_e32 v0, -1, v1 ; encoding: [0xc1,0x02,0x00,0x36] +// GFX12XX: v_and_b32_e32 v0, -1, v1 ; encoding: [0xc1,0x02,0x00,0x36] +// GFX89: v_and_b32_e32 v0, -1, v1 ; encoding: [0xc1,0x02,0x00,0x26] +// SICI: v_and_b32_e32 v0, -1, v1 ; encoding: [0xc1,0x02,0x00,0x36] v_not_b16 v5.l, 1 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_not_b16_e32 v5.l, 1 ; encoding: [0x81,0xd2,0x0a,0x7e] // GFX1250: v_not_b16_e32 v5.l, 1 ; encoding: [0x81,0xd2,0x0a,0x7e] -// NOGFX12: :[[@LINE-5]]:1: error: operands are not valid for this GPU or mode +// NOGFX12: :[[@LINE-3]]:1: error: operands are not valid for this GPU or mode +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_not_b16 v5.l, lit(1) -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_not_b16_e32 v5.l, lit(0x1) ; encoding: [0xff,0xd2,0x0a,0x7e,0x01,0x00,0x00,0x00] // GFX1250: v_not_b16_e32 v5.l, lit(0x1) ; encoding: [0xff,0xd2,0x0a,0x7e,0x01,0x00,0x00,0x00] -// NOGFX12: :[[@LINE-5]]:1: error: operands are not valid for this GPU or mode +// NOGFX12: :[[@LINE-3]]:1: error: operands are not valid for this GPU or mode +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU s_mov_b64 s[0:1], 1 // GFX8PLUS: s_mov_b64 s[0:1], 1 ; encoding: [0x81,0x01,0x80,0xbe] // SICI: s_mov_b64 s[0:1], 1 ; encoding: [0x81,0x04,0x80,0xbe] s_mov_b64 s[0:1], lit(1) -// SICI: s_mov_b64 s[0:1], lit(0x1) ; encoding: [0xff,0x04,0x80,0xbe,0x01,0x00,0x00,0x00] -// GFX89: s_mov_b64 s[0:1], lit(0x1) ; encoding: [0xff,0x01,0x80,0xbe,0x01,0x00,0x00,0x00] // GFX11: s_mov_b64 s[0:1], lit(0x1) ; encoding: [0xff,0x01,0x80,0xbe,0x01,0x00,0x00,0x00] // GFX12: s_mov_b64 s[0:1], lit(0x1) ; encoding: [0xff,0x01,0x80,0xbe,0x01,0x00,0x00,0x00] // GFX1250: s_mov_b64 s[0:1], lit(0x1) ; encoding: [0xfe,0x01,0x80,0xbe,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00] +// GFX89: s_mov_b64 s[0:1], lit(0x1) ; encoding: [0xff,0x01,0x80,0xbe,0x01,0x00,0x00,0x00] +// SICI: s_mov_b64 s[0:1], lit(0x1) ; encoding: [0xff,0x04,0x80,0xbe,0x01,0x00,0x00,0x00] v_and_b32_e32 v0, 1, v1 -// SICI: v_and_b32_e32 v0, 1, v1 ; encoding: [0x81,0x02,0x00,0x36] -// GFX89: v_and_b32_e32 v0, 1, v1 ; encoding: [0x81,0x02,0x00,0x26] -// GFX12XX: v_and_b32_e32 v0, 1, v1 ; encoding: [0x81,0x02,0x00,0x36] // GFX11: v_and_b32_e32 v0, 1, v1 ; encoding: [0x81,0x02,0x00,0x36] +// GFX12XX: v_and_b32_e32 v0, 1, v1 ; encoding: [0x81,0x02,0x00,0x36] +// GFX89: v_and_b32_e32 v0, 1, v1 ; encoding: [0x81,0x02,0x00,0x26] +// SICI: v_and_b32_e32 v0, 1, v1 ; encoding: [0x81,0x02,0x00,0x36] v_and_b32_e32 v0, lit(1), v1 -// SICI: v_and_b32_e32 v0, lit(0x1), v1 ; encoding: [0xff,0x02,0x00,0x36,0x01,0x00,0x00,0x00] -// GFX89: v_and_b32_e32 v0, lit(0x1), v1 ; encoding: [0xff,0x02,0x00,0x26,0x01,0x00,0x00,0x00] -// GFX12XX: v_and_b32_e32 v0, lit(0x1), v1 ; encoding: [0xff,0x02,0x00,0x36,0x01,0x00,0x00,0x00] // GFX11: v_and_b32_e32 v0, lit(0x1), v1 ; encoding: [0xff,0x02,0x00,0x36,0x01,0x00,0x00,0x00] +// GFX12XX: v_and_b32_e32 v0, lit(0x1), v1 ; encoding: [0xff,0x02,0x00,0x36,0x01,0x00,0x00,0x00] +// GFX89: v_and_b32_e32 v0, lit(0x1), v1 ; encoding: [0xff,0x02,0x00,0x26,0x01,0x00,0x00,0x00] +// SICI: v_and_b32_e32 v0, lit(0x1), v1 ; encoding: [0xff,0x02,0x00,0x36,0x01,0x00,0x00,0x00] v_pk_add_u16 v5, exec_lo, 1 +// GFX11: v_pk_add_u16 v5, exec_lo, 1 ; encoding: [0x05,0x40,0x0a,0xcc,0x7e,0x02,0x01,0x18] // GFX12XX: v_pk_add_u16 v5, exec_lo, 1 ; encoding: [0x05,0x40,0x0a,0xcc,0x7e,0x02,0x01,0x18] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX9: v_pk_add_u16 v5, exec_lo, 1 ; encoding: [0x05,0x40,0x8a,0xd3,0x7e,0x02,0x01,0x18] -// GFX11: v_pk_add_u16 v5, exec_lo, 1 ; encoding: [0x05,0x40,0x0a,0xcc,0x7e,0x02,0x01,0x18] +// NOSICI: :[[@LINE-4]]:1: error: instruction not supported on this GPU // NOVI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_pk_add_u16 v5, exec_lo, lit(1) -// GFX12XX: v_pk_add_u16 v5, exec_lo, lit(0x1) ; encoding: [0x05,0x40,0x0a,0xcc,0x7e,0xfe,0x01,0x18,0x01,0x00,0x00,0x00] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_pk_add_u16 v5, exec_lo, lit(0x1) ; encoding: [0x05,0x40,0x0a,0xcc,0x7e,0xfe,0x01,0x18,0x01,0x00,0x00,0x00] -// NOVI: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX9: :[[@LINE-5]]:31: error: invalid operand (violates constant bus restrictions) +// GFX12XX: v_pk_add_u16 v5, exec_lo, lit(0x1) ; encoding: [0x05,0x40,0x0a,0xcc,0x7e,0xfe,0x01,0x18,0x01,0x00,0x00,0x00] +// NOGFX9: :[[@LINE-3]]:31: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_perm_pk16_b6_u4 v[2:4], v4, v[4:5], 1 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX1250: v_perm_pk16_b6_u4 v[2:4], v4, v[4:5], 1 ; encoding: [0x02,0x00,0x42,0xd6,0x04,0x09,0x06,0x02] -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU v_perm_pk16_b6_u4 v[2:4], v4, v[4:5], lit(1) -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX1250: v_perm_pk16_b6_u4 v[2:4], v4, v[4:5], lit(0x1) ; encoding: [0x02,0x00,0x42,0xd6,0x04,0x09,0xfe,0x03,0x01,0x00,0x00,0x00] -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU //---------------------------------------------------------------------------// // 1/(2*PI) @@ -948,46 +948,46 @@ v_trunc_f32_e32 v0, 0x3fc45f306dc9c882 // NOGCN: :[[@LINE-1]]:21: error: invalid operand for instruction v_fract_f64_e32 v[0:1], 0x3fc45f306dc9c882 -// GFX89: v_fract_f64_e32 v[0:1], 0.15915494309189532 ; encoding: [0xf8,0x64,0x00,0x7e] -// GFX12XX: v_fract_f64_e32 v[0:1], 0.15915494309189532 ; encoding: [0xf8,0x7c,0x00,0x7e] -// NOSICI: :[[@LINE-3]]:25: error: invalid operand for instruction // GFX11: v_fract_f64_e32 v[0:1], 0.15915494309189532 ; encoding: [0xf8,0x7c,0x00,0x7e] +// GFX12XX: v_fract_f64_e32 v[0:1], 0.15915494309189532 ; encoding: [0xf8,0x7c,0x00,0x7e] +// GFX89: v_fract_f64_e32 v[0:1], 0.15915494309189532 ; encoding: [0xf8,0x64,0x00,0x7e] +// NOSICI: :[[@LINE-4]]:25: error: invalid operand for instruction // NOSICIVI: :[[@LINE-2]]:25: error: invalid operand for instruction v_trunc_f32_e32 v0, 0x3e22f983 -// SICI: v_trunc_f32_e32 v0, 0x3e22f983 ; encoding: [0xff,0x42,0x00,0x7e,0x83,0xf9,0x22,0x3e] -// GFX89: v_trunc_f32_e32 v0, 0.15915494 ; encoding: [0xf8,0x38,0x00,0x7e] -// GFX12XX: v_trunc_f32_e32 v0, 0.15915494 ; encoding: [0xf8,0x42,0x00,0x7e] // GFX11: v_trunc_f32_e32 v0, 0.15915494 ; encoding: [0xf8,0x42,0x00,0x7e] +// GFX12XX: v_trunc_f32_e32 v0, 0.15915494 ; encoding: [0xf8,0x42,0x00,0x7e] +// GFX89: v_trunc_f32_e32 v0, 0.15915494 ; encoding: [0xf8,0x38,0x00,0x7e] +// SICI: v_trunc_f32_e32 v0, 0x3e22f983 ; encoding: [0xff,0x42,0x00,0x7e,0x83,0xf9,0x22,0x3e] v_fract_f64_e32 v[0:1], 0x3e22f983 -// SICI: v_fract_f64_e32 v[0:1], 0x3e22f983 ; encoding: [0xff,0x7c,0x00,0x7e,0x83,0xf9,0x22,0x3e] -// GFX89: v_fract_f64_e32 v[0:1], 0x3e22f983 ; encoding: [0xff,0x64,0x00,0x7e,0x83,0xf9,0x22,0x3e] -// GFX12XX: v_fract_f64_e32 v[0:1], 0x3e22f983 ; encoding: [0xff,0x7c,0x00,0x7e,0x83,0xf9,0x22,0x3e] // GFX11: v_fract_f64_e32 v[0:1], 0x3e22f983 ; encoding: [0xff,0x7c,0x00,0x7e,0x83,0xf9,0x22,0x3e] +// GFX12XX: v_fract_f64_e32 v[0:1], 0x3e22f983 ; encoding: [0xff,0x7c,0x00,0x7e,0x83,0xf9,0x22,0x3e] +// GFX89: v_fract_f64_e32 v[0:1], 0x3e22f983 ; encoding: [0xff,0x64,0x00,0x7e,0x83,0xf9,0x22,0x3e] +// SICI: v_fract_f64_e32 v[0:1], 0x3e22f983 ; encoding: [0xff,0x7c,0x00,0x7e,0x83,0xf9,0x22,0x3e] v_trunc_f32_e64 v0, 0x3fc45f306dc9c882 // NOGCN: :[[@LINE-1]]:21: error: invalid operand for instruction v_fract_f64_e64 v[0:1], 0x3fc45f306dc9c882 -// GFX89: v_fract_f64_e64 v[0:1], 0.15915494309189532 ; encoding: [0x00,0x00,0x72,0xd1,0xf8,0x00,0x00,0x00] -// GFX12XX: v_fract_f64_e64 v[0:1], 0.15915494309189532 ; encoding: [0x00,0x00,0xbe,0xd5,0xf8,0x00,0x00,0x00] -// NOSICI: :[[@LINE-3]]:25: error: invalid operand for instruction // GFX11: v_fract_f64_e64 v[0:1], 0.15915494309189532 ; encoding: [0x00,0x00,0xbe,0xd5,0xf8,0x00,0x00,0x00] +// GFX12XX: v_fract_f64_e64 v[0:1], 0.15915494309189532 ; encoding: [0x00,0x00,0xbe,0xd5,0xf8,0x00,0x00,0x00] +// GFX89: v_fract_f64_e64 v[0:1], 0.15915494309189532 ; encoding: [0x00,0x00,0x72,0xd1,0xf8,0x00,0x00,0x00] +// NOSICI: :[[@LINE-4]]:25: error: invalid operand for instruction // NOSICIVI: :[[@LINE-2]]:25: error: invalid operand for instruction v_trunc_f32_e64 v0, 0x3e22f983 -// GFX89: v_trunc_f32_e64 v0, 0.15915494 ; encoding: [0x00,0x00,0x5c,0xd1,0xf8,0x00,0x00,0x00] -// GFX12XX: v_trunc_f32_e64 v0, 0.15915494 ; encoding: [0x00,0x00,0xa1,0xd5,0xf8,0x00,0x00,0x00] -// NOSICI: :[[@LINE-3]]:21: error: literal operands are not supported // GFX11: v_trunc_f32_e64 v0, 0.15915494 ; encoding: [0x00,0x00,0xa1,0xd5,0xf8,0x00,0x00,0x00] +// GFX12XX: v_trunc_f32_e64 v0, 0.15915494 ; encoding: [0x00,0x00,0xa1,0xd5,0xf8,0x00,0x00,0x00] +// GFX89: v_trunc_f32_e64 v0, 0.15915494 ; encoding: [0x00,0x00,0x5c,0xd1,0xf8,0x00,0x00,0x00] +// NOSICI: :[[@LINE-4]]:21: error: literal operands are not supported // NOSICIVI: :[[@LINE-2]]:21: error: literal operands are not supported v_fract_f64_e64 v[0:1], 0x3e22f983 +// GFX11: v_fract_f64_e64 v[0:1], 0x3e22f983 ; encoding: [0x00,0x00,0xbe,0xd5,0xff,0x00,0x00,0x00,0x83,0xf9,0x22,0x3e] // GFX12XX: v_fract_f64_e64 v[0:1], 0x3e22f983 ; encoding: [0x00,0x00,0xbe,0xd5,0xff,0x00,0x00,0x00,0x83,0xf9,0x22,0x3e] -// NOSICI: :[[@LINE-2]]:25: error: literal operands are not supported // NOGFX89: :[[@LINE-3]]:25: error: literal operands are not supported -// GFX11: v_fract_f64_e64 v[0:1], 0x3e22f983 ; encoding: [0x00,0x00,0xbe,0xd5,0xff,0x00,0x00,0x00,0x83,0xf9,0x22,0x3e] +// NOSICI: :[[@LINE-4]]:25: error: literal operands are not supported // NOSICIVI: :[[@LINE-1]]:25: error: literal operands are not supported s_mov_b64_e32 s[0:1], 0.159154943091895317852646485335 @@ -996,37 +996,37 @@ s_mov_b64_e32 s[0:1], 0.159154943091895317852646485335 // NOSICIVI: :[[@LINE-2]]:23: error: invalid operand for instruction v_and_b32_e32 v0, 0.159154943091895317852646485335, v1 -// SICI: v_and_b32_e32 v0, 0x3e22f983, v1 ; encoding: [0xff,0x02,0x00,0x36,0x83,0xf9,0x22,0x3e] -// GFX89: v_and_b32_e32 v0, 0.15915494, v1 ; encoding: [0xf8,0x02,0x00,0x26] -// GFX12XX: v_and_b32_e32 v0, 0.15915494, v1 ; encoding: [0xf8,0x02,0x00,0x36] // GFX11: v_and_b32_e32 v0, 0.15915494, v1 ; encoding: [0xf8,0x02,0x00,0x36] +// GFX12XX: v_and_b32_e32 v0, 0.15915494, v1 ; encoding: [0xf8,0x02,0x00,0x36] +// GFX89: v_and_b32_e32 v0, 0.15915494, v1 ; encoding: [0xf8,0x02,0x00,0x26] +// SICI: v_and_b32_e32 v0, 0x3e22f983, v1 ; encoding: [0xff,0x02,0x00,0x36,0x83,0xf9,0x22,0x3e] v_and_b32_e64 v0, 0.159154943091895317852646485335, v1 -// GFX89: v_and_b32_e64 v0, 0.15915494, v1 ; encoding: [0x00,0x00,0x13,0xd1,0xf8,0x02,0x02,0x00] -// GFX12XX: v_and_b32_e64 v0, 0.15915494, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xf8,0x02,0x02,0x00] -// NOSICI: :[[@LINE-3]]:19: error: literal operands are not supported // GFX11: v_and_b32_e64 v0, 0.15915494, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xf8,0x02,0x02,0x00] +// GFX12XX: v_and_b32_e64 v0, 0.15915494, v1 ; encoding: [0x00,0x00,0x1b,0xd5,0xf8,0x02,0x02,0x00] +// GFX89: v_and_b32_e64 v0, 0.15915494, v1 ; encoding: [0x00,0x00,0x13,0xd1,0xf8,0x02,0x02,0x00] +// NOSICI: :[[@LINE-4]]:19: error: literal operands are not supported // NOSICIVI: :[[@LINE-2]]:19: error: literal operands are not supported v_fract_f64 v[0:1], 0.159154943091895317852646485335 -// SICI: v_fract_f64_e32 v[0:1], 0x3fc45f30 ; encoding: [0xff,0x7c,0x00,0x7e,0x30,0x5f,0xc4,0x3f] -// GFX89: v_fract_f64_e32 v[0:1], 0.15915494309189532 ; encoding: [0xf8,0x64,0x00,0x7e] +// GFX11: v_fract_f64_e32 v[0:1], 0.15915494309189532 ; encoding: [0xf8,0x7c,0x00,0x7e] // GFX12XX: v_fract_f64_e32 v[0:1], 0.15915494309189532 ; encoding: [0xf8,0x7c,0x00,0x7e] +// GFX89: v_fract_f64_e32 v[0:1], 0.15915494309189532 ; encoding: [0xf8,0x64,0x00,0x7e] // NOSICI: :[[@LINE-4]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero -// GFX11: v_fract_f64_e32 v[0:1], 0.15915494309189532 ; encoding: [0xf8,0x7c,0x00,0x7e] +// SICI: v_fract_f64_e32 v[0:1], 0x3fc45f30 ; encoding: [0xff,0x7c,0x00,0x7e,0x30,0x5f,0xc4,0x3f] // NOSICIVI: :[[@LINE-3]]:1: warning: Can't encode literal as exact 64-bit floating-point operand. Low 32-bits will be set to zero v_trunc_f32 v0, 0.159154943091895317852646485335 -// SICI: v_trunc_f32_e32 v0, 0x3e22f983 ; encoding: [0xff,0x42,0x00,0x7e,0x83,0xf9,0x22,0x3e] -// GFX89: v_trunc_f32_e32 v0, 0.15915494 ; encoding: [0xf8,0x38,0x00,0x7e] -// GFX12XX: v_trunc_f32_e32 v0, 0.15915494 ; encoding: [0xf8,0x42,0x00,0x7e] // GFX11: v_trunc_f32_e32 v0, 0.15915494 ; encoding: [0xf8,0x42,0x00,0x7e] +// GFX12XX: v_trunc_f32_e32 v0, 0.15915494 ; encoding: [0xf8,0x42,0x00,0x7e] +// GFX89: v_trunc_f32_e32 v0, 0.15915494 ; encoding: [0xf8,0x38,0x00,0x7e] +// SICI: v_trunc_f32_e32 v0, 0x3e22f983 ; encoding: [0xff,0x42,0x00,0x7e,0x83,0xf9,0x22,0x3e] v_trunc_f32 v0, lit(0.159154943091895317852646485335) -// SICI: v_trunc_f32_e32 v0, lit(0x3e22f983) ; encoding: [0xff,0x42,0x00,0x7e,0x83,0xf9,0x22,0x3e] -// GFX89: v_trunc_f32_e32 v0, lit(0x3e22f983) ; encoding: [0xff,0x38,0x00,0x7e,0x83,0xf9,0x22,0x3e] -// GFX12XX: v_trunc_f32_e32 v0, lit(0x3e22f983) ; encoding: [0xff,0x42,0x00,0x7e,0x83,0xf9,0x22,0x3e] // GFX11: v_trunc_f32_e32 v0, lit(0x3e22f983) ; encoding: [0xff,0x42,0x00,0x7e,0x83,0xf9,0x22,0x3e] +// GFX12XX: v_trunc_f32_e32 v0, lit(0x3e22f983) ; encoding: [0xff,0x42,0x00,0x7e,0x83,0xf9,0x22,0x3e] +// GFX89: v_trunc_f32_e32 v0, lit(0x3e22f983) ; encoding: [0xff,0x38,0x00,0x7e,0x83,0xf9,0x22,0x3e] +// SICI: v_trunc_f32_e32 v0, lit(0x3e22f983) ; encoding: [0xff,0x42,0x00,0x7e,0x83,0xf9,0x22,0x3e] //---------------------------------------------------------------------------// // integer literal truncation checks @@ -1051,54 +1051,54 @@ v_trunc_f32 v0, 0x1fffffff000 // NOGCN: :[[@LINE-1]]:17: error: invalid operand for instruction s_mov_b64 s[0:1], 0x101ffffffff -// NOSICI: :[[@LINE-1]]:19: error: invalid operand for instruction -// NOGFX89: :[[@LINE-2]]:19: error: invalid operand for instruction // GFX1250: s_mov_b64 s[0:1], 0x101ffffffff ; encoding: [0xfe,0x01,0x80,0xbe,0xff,0xff,0xff,0xff,0x01,0x01,0x00,0x00] -// NOGFX11: :[[@LINE-4]]:19: error: invalid operand for instruction -// NOGFX12: :[[@LINE-5]]:19: error: invalid operand for instruction +// NOGFX11: :[[@LINE-2]]:19: error: invalid operand for instruction +// NOGFX12: :[[@LINE-3]]:19: error: invalid operand for instruction +// NOGFX89: :[[@LINE-4]]:19: error: invalid operand for instruction +// NOSICI: :[[@LINE-5]]:19: error: invalid operand for instruction // NOSICIVI: :[[@LINE-1]]:19: error: invalid operand for instruction s_mov_b64 s[0:1], 0x1000000001 -// NOSICI: :[[@LINE-1]]:19: error: invalid operand for instruction -// NOGFX89: :[[@LINE-2]]:19: error: invalid operand for instruction // GFX1250: s_mov_b64 s[0:1], 0x1000000001 ; encoding: [0xfe,0x01,0x80,0xbe,0x01,0x00,0x00,0x00,0x10,0x00,0x00,0x00] -// NOGFX11: :[[@LINE-4]]:19: error: invalid operand for instruction -// NOGFX12: :[[@LINE-5]]:19: error: invalid operand for instruction +// NOGFX11: :[[@LINE-2]]:19: error: invalid operand for instruction +// NOGFX12: :[[@LINE-3]]:19: error: invalid operand for instruction +// NOGFX89: :[[@LINE-4]]:19: error: invalid operand for instruction +// NOSICI: :[[@LINE-5]]:19: error: invalid operand for instruction // NOSICIVI: :[[@LINE-1]]:19: error: invalid operand for instruction s_mov_b64 s[0:1], 0x1000000fff -// NOSICI: :[[@LINE-1]]:19: error: invalid operand for instruction -// NOGFX89: :[[@LINE-2]]:19: error: invalid operand for instruction // GFX1250: s_mov_b64 s[0:1], 0x1000000fff ; encoding: [0xfe,0x01,0x80,0xbe,0xff,0x0f,0x00,0x00,0x10,0x00,0x00,0x00] -// NOGFX11: :[[@LINE-4]]:19: error: invalid operand for instruction -// NOGFX12: :[[@LINE-5]]:19: error: invalid operand for instruction +// NOGFX11: :[[@LINE-2]]:19: error: invalid operand for instruction +// NOGFX12: :[[@LINE-3]]:19: error: invalid operand for instruction +// NOGFX89: :[[@LINE-4]]:19: error: invalid operand for instruction +// NOSICI: :[[@LINE-5]]:19: error: invalid operand for instruction // NOSICIVI: :[[@LINE-1]]:19: error: invalid operand for instruction v_trunc_f64 v[0:1], 0x1fffffffff0 -// NOGFX89: :[[@LINE-1]]:21: error: invalid operand for instruction // GFX1250: v_trunc_f64_e32 v[0:1], 0x1fffffffff0 ; encoding: [0xfe,0x2e,0x00,0x7e,0xf0,0xff,0xff,0xff,0xff,0x01,0x00,0x00] -// NOSI: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOCI: :[[@LINE-4]]:21: error: invalid operand for instruction -// NOGFX11: :[[@LINE-5]]:21: error: invalid operand for instruction -// NOGFX12: :[[@LINE-6]]:21: error: invalid operand for instruction +// NOCI: :[[@LINE-2]]:21: error: invalid operand for instruction +// NOGFX11: :[[@LINE-3]]:21: error: invalid operand for instruction +// NOGFX12: :[[@LINE-4]]:21: error: invalid operand for instruction +// NOGFX89: :[[@LINE-5]]:21: error: invalid operand for instruction +// NOSI: :[[@LINE-6]]:1: error: instruction not supported on this GPU // NOCIVI: :[[@LINE-4]]:21: error: invalid operand for instruction v_trunc_f64 v[0:1], 0x100000001 -// NOGFX89: :[[@LINE-1]]:21: error: invalid operand for instruction // GFX1250: v_trunc_f64_e32 v[0:1], 0x100000001 ; encoding: [0xfe,0x2e,0x00,0x7e,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00] -// NOSI: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOCI: :[[@LINE-4]]:21: error: invalid operand for instruction -// NOGFX11: :[[@LINE-5]]:21: error: invalid operand for instruction -// NOGFX12: :[[@LINE-6]]:21: error: invalid operand for instruction +// NOCI: :[[@LINE-2]]:21: error: invalid operand for instruction +// NOGFX11: :[[@LINE-3]]:21: error: invalid operand for instruction +// NOGFX12: :[[@LINE-4]]:21: error: invalid operand for instruction +// NOGFX89: :[[@LINE-5]]:21: error: invalid operand for instruction +// NOSI: :[[@LINE-6]]:1: error: instruction not supported on this GPU // NOCIVI: :[[@LINE-4]]:21: error: invalid operand for instruction v_trunc_f64 v[0:1], 0x1fffffff000 -// NOGFX89: :[[@LINE-1]]:21: error: invalid operand for instruction // GFX1250: v_trunc_f64_e32 v[0:1], 0x1fffffff000 ; encoding: [0xfe,0x2e,0x00,0x7e,0x00,0xf0,0xff,0xff,0xff,0x01,0x00,0x00] -// NOSI: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOCI: :[[@LINE-4]]:21: error: invalid operand for instruction -// NOGFX11: :[[@LINE-5]]:21: error: invalid operand for instruction -// NOGFX12: :[[@LINE-6]]:21: error: invalid operand for instruction +// NOCI: :[[@LINE-2]]:21: error: invalid operand for instruction +// NOGFX11: :[[@LINE-3]]:21: error: invalid operand for instruction +// NOGFX12: :[[@LINE-4]]:21: error: invalid operand for instruction +// NOGFX89: :[[@LINE-5]]:21: error: invalid operand for instruction +// NOSI: :[[@LINE-6]]:1: error: instruction not supported on this GPU // NOCIVI: :[[@LINE-4]]:21: error: invalid operand for instruction //---------------------------------------------------------------------------// @@ -1106,210 +1106,210 @@ v_trunc_f64 v[0:1], 0x1fffffff000 //---------------------------------------------------------------------------// buffer_atomic_add v0, off, s[0:3], scc offset:4095 -// SICI: buffer_atomic_add v0, off, s[0:3], src_scc offset:4095 ; encoding: [0xff,0x0f,0xc8,0xe0,0x00,0x00,0x00,0xfd] -// GFX89: buffer_atomic_add v0, off, s[0:3], src_scc offset:4095 ; encoding: [0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0xfd] -// GFX12XX: buffer_atomic_add_u32 v0, off, s[0:3], src_scc offset:4095 ; encoding: [0x7d,0x40,0x0d,0xc4,0x00,0x00,0x80,0x00,0x00,0xff,0x0f,0x00] // GFX11: buffer_atomic_add_u32 v0, off, s[0:3], src_scc offset:4095 ; encoding: [0xff,0x0f,0xd4,0xe0,0x00,0x00,0x00,0xfd] +// GFX12XX: buffer_atomic_add_u32 v0, off, s[0:3], src_scc offset:4095 ; encoding: [0x7d,0x40,0x0d,0xc4,0x00,0x00,0x80,0x00,0x00,0xff,0x0f,0x00] +// GFX89: buffer_atomic_add v0, off, s[0:3], src_scc offset:4095 ; encoding: [0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0xfd] +// SICI: buffer_atomic_add v0, off, s[0:3], src_scc offset:4095 ; encoding: [0xff,0x0f,0xc8,0xe0,0x00,0x00,0x00,0xfd] s_add_i32 s0, vccz, s0 -// SICI: s_add_i32 s0, src_vccz, s0 ; encoding: [0xfb,0x00,0x00,0x81] // GFX89: s_add_i32 s0, src_vccz, s0 ; encoding: [0xfb,0x00,0x00,0x81] -// NOGFX11: :[[@LINE-3]]:15: error: src_vccz register not available on this GPU -// NOGFX12: :[[@LINE-4]]:15: error: src_vccz register not available on this GPU -// NOGFX1250: :[[@LINE-5]]:15: error: src_vccz register not available on this GPU +// NOGFX11: :[[@LINE-2]]:15: error: src_vccz register not available on this GPU +// NOGFX12: :[[@LINE-3]]:15: error: src_vccz register not available on this GPU +// NOGFX1250: :[[@LINE-4]]:15: error: src_vccz register not available on this GPU +// SICI: s_add_i32 s0, src_vccz, s0 ; encoding: [0xfb,0x00,0x00,0x81] s_add_i32 s0, execz, s0 -// SICI: s_add_i32 s0, src_execz, s0 ; encoding: [0xfc,0x00,0x00,0x81] // GFX89: s_add_i32 s0, src_execz, s0 ; encoding: [0xfc,0x00,0x00,0x81] -// NOGFX11: :[[@LINE-3]]:15: error: src_execz register not available on this GPU -// NOGFX12: :[[@LINE-4]]:15: error: src_execz register not available on this GPU -// NOGFX1250: :[[@LINE-5]]:15: error: src_execz register not available on this GPU +// NOGFX11: :[[@LINE-2]]:15: error: src_execz register not available on this GPU +// NOGFX12: :[[@LINE-3]]:15: error: src_execz register not available on this GPU +// NOGFX1250: :[[@LINE-4]]:15: error: src_execz register not available on this GPU +// SICI: s_add_i32 s0, src_execz, s0 ; encoding: [0xfc,0x00,0x00,0x81] s_add_i32 s0, scc, s0 -// SICI: s_add_i32 s0, src_scc, s0 ; encoding: [0xfd,0x00,0x00,0x81] -// GFX89: s_add_i32 s0, src_scc, s0 ; encoding: [0xfd,0x00,0x00,0x81] -// GFX12XX: s_add_co_i32 s0, src_scc, s0 ; encoding: [0xfd,0x00,0x00,0x81] // GFX11: s_add_i32 s0, src_scc, s0 ; encoding: [0xfd,0x00,0x00,0x81] +// GFX12XX: s_add_co_i32 s0, src_scc, s0 ; encoding: [0xfd,0x00,0x00,0x81] +// GFX89: s_add_i32 s0, src_scc, s0 ; encoding: [0xfd,0x00,0x00,0x81] +// SICI: s_add_i32 s0, src_scc, s0 ; encoding: [0xfd,0x00,0x00,0x81] s_and_b64 s[0:1], s[0:1], src_vccz -// SICI: s_and_b64 s[0:1], s[0:1], src_vccz ; encoding: [0x00,0xfb,0x80,0x87] // GFX89: s_and_b64 s[0:1], s[0:1], src_vccz ; encoding: [0x00,0xfb,0x80,0x86] -// NOGFX11: :[[@LINE-3]]:27: error: src_vccz register not available on this GPU -// NOGFX12: :[[@LINE-4]]:27: error: src_vccz register not available on this GPU -// NOGFX1250: :[[@LINE-5]]:27: error: src_vccz register not available on this GPU +// NOGFX11: :[[@LINE-2]]:27: error: src_vccz register not available on this GPU +// NOGFX12: :[[@LINE-3]]:27: error: src_vccz register not available on this GPU +// NOGFX1250: :[[@LINE-4]]:27: error: src_vccz register not available on this GPU +// SICI: s_and_b64 s[0:1], s[0:1], src_vccz ; encoding: [0x00,0xfb,0x80,0x87] s_and_b64 s[0:1], s[0:1], src_execz -// SICI: s_and_b64 s[0:1], s[0:1], src_execz ; encoding: [0x00,0xfc,0x80,0x87] // GFX89: s_and_b64 s[0:1], s[0:1], src_execz ; encoding: [0x00,0xfc,0x80,0x86] -// NOGFX11: :[[@LINE-3]]:27: error: src_execz register not available on this GPU -// NOGFX12: :[[@LINE-4]]:27: error: src_execz register not available on this GPU -// NOGFX1250: :[[@LINE-5]]:27: error: src_execz register not available on this GPU +// NOGFX11: :[[@LINE-2]]:27: error: src_execz register not available on this GPU +// NOGFX12: :[[@LINE-3]]:27: error: src_execz register not available on this GPU +// NOGFX1250: :[[@LINE-4]]:27: error: src_execz register not available on this GPU +// SICI: s_and_b64 s[0:1], s[0:1], src_execz ; encoding: [0x00,0xfc,0x80,0x87] s_and_b64 s[0:1], s[0:1], src_scc -// SICI: s_and_b64 s[0:1], s[0:1], src_scc ; encoding: [0x00,0xfd,0x80,0x87] -// GFX89: s_and_b64 s[0:1], s[0:1], src_scc ; encoding: [0x00,0xfd,0x80,0x86] -// GFX12XX: s_and_b64 s[0:1], s[0:1], src_scc ; encoding: [0x00,0xfd,0x80,0x8b] // GFX11: s_and_b64 s[0:1], s[0:1], src_scc ; encoding: [0x00,0xfd,0x80,0x8b] +// GFX12XX: s_and_b64 s[0:1], s[0:1], src_scc ; encoding: [0x00,0xfd,0x80,0x8b] +// GFX89: s_and_b64 s[0:1], s[0:1], src_scc ; encoding: [0x00,0xfd,0x80,0x86] +// SICI: s_and_b64 s[0:1], s[0:1], src_scc ; encoding: [0x00,0xfd,0x80,0x87] v_add_u16 v0, vccz, v0 // GFX89: v_add_u16_e32 v0, src_vccz, v0 ; encoding: [0xfb,0x00,0x00,0x4c] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU -// NOGFX11: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU // NOSICIVI: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_add_u16_sdwa v0, scc, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU // GFX9: v_add_u16_sdwa v0, src_scc, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0xfd,0x06,0x86,0x06] -// NOVI: :[[@LINE-3]]:20: error: invalid operand for instruction -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-6]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-6]]:20: error: invalid operand for instruction // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_add_u16_sdwa v0, v0, scc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU // GFX9: v_add_u16_sdwa v0, v0, src_scc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xfa,0x01,0x4c,0x00,0x06,0x06,0x86] -// NOVI: :[[@LINE-3]]:24: error: invalid operand for instruction -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-6]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-6]]:24: error: invalid operand for instruction // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_add_u32 v0, execz, v0 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU // GFX9: v_add_u32_e32 v0, src_execz, v0 ; encoding: [0xfc,0x00,0x00,0x68] -// NOVI: :[[@LINE-3]]:1: error: operands are not valid for this GPU or mode -// NOGFX11: :[[@LINE-4]]:15: error: src_execz register not available on this GPU -// NOGFX12: :[[@LINE-5]]:15: error: src_execz register not available on this GPU -// NOGFX1250: :[[@LINE-6]]:15: error: src_execz register not available on this GPU +// NOGFX11: :[[@LINE-2]]:15: error: src_execz register not available on this GPU +// NOGFX12: :[[@LINE-3]]:15: error: src_execz register not available on this GPU +// NOGFX1250: :[[@LINE-4]]:15: error: src_execz register not available on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-6]]:1: error: operands are not valid for this GPU or mode // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_add_u32_e64 v0, scc, v0 +// GFX11: v_add_nc_u32_e64 v0, src_scc, v0 ; encoding: [0x00,0x00,0x25,0xd5,0xfd,0x00,0x02,0x00] // GFX12XX: v_add_nc_u32_e64 v0, src_scc, v0 ; encoding: [0x00,0x00,0x25,0xd5,0xfd,0x00,0x02,0x00] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX9: v_add_u32_e64 v0, src_scc, v0 ; encoding: [0x00,0x00,0x34,0xd1,0xfd,0x00,0x02,0x00] -// GFX11: v_add_nc_u32_e64 v0, src_scc, v0 ; encoding: [0x00,0x00,0x25,0xd5,0xfd,0x00,0x02,0x00] +// NOSICI: :[[@LINE-4]]:1: error: instruction not supported on this GPU // NOVI: :[[@LINE-5]]:1: error: operands are not valid for this GPU or mode // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_cmp_eq_i64 vcc, scc, v[0:1] -// SICI: v_cmp_eq_i64_e32 vcc, src_scc, v[0:1] ; encoding: [0xfd,0x00,0x44,0x7d] // GFX89: v_cmp_eq_i64_e32 vcc, src_scc, v[0:1] ; encoding: [0xfd,0x00,0xc4,0x7d] -// NOGFX11: :[[@LINE-3]]:1: error: operands are not valid for this GPU or mode -// NOGFX12: :[[@LINE-4]]:1: error: operands are not valid for this GPU or mode -// NOGFX1250: :[[@LINE-5]]:1: error: operands are not valid for this GPU or mode +// NOGFX11: :[[@LINE-2]]:1: error: operands are not valid for this GPU or mode +// NOGFX12: :[[@LINE-3]]:1: error: operands are not valid for this GPU or mode +// NOGFX1250: :[[@LINE-4]]:1: error: operands are not valid for this GPU or mode +// SICI: v_cmp_eq_i64_e32 vcc, src_scc, v[0:1] ; encoding: [0xfd,0x00,0x44,0x7d] v_max_f16 v0, execz, v0 // GFX89: v_max_f16_e32 v0, src_execz, v0 ; encoding: [0xfc,0x00,0x00,0x5a] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU -// NOGFX11: :[[@LINE-3]]:15: error: src_execz register not available on this GPU -// NOGFX12: :[[@LINE-4]]:15: error: src_execz register not available on this GPU -// NOGFX1250: :[[@LINE-5]]:15: error: src_execz register not available on this GPU +// NOGFX11: :[[@LINE-2]]:15: error: src_execz register not available on this GPU +// NOGFX12: :[[@LINE-3]]:15: error: src_execz register not available on this GPU +// NOGFX1250: :[[@LINE-4]]:15: error: src_execz register not available on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU // NOSICIVI: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_max_f32 v0, vccz, v0 -// SICI: v_max_f32_e32 v0, src_vccz, v0 ; encoding: [0xfb,0x00,0x00,0x20] // GFX89: v_max_f32_e32 v0, src_vccz, v0 ; encoding: [0xfb,0x00,0x00,0x16] -// NOGFX11: :[[@LINE-3]]:15: error: src_vccz register not available on this GPU -// NOGFX12: :[[@LINE-4]]:15: error: src_vccz register not available on this GPU -// NOGFX1250: :[[@LINE-5]]:15: error: src_vccz register not available on this GPU +// NOGFX11: :[[@LINE-2]]:15: error: src_vccz register not available on this GPU +// NOGFX12: :[[@LINE-3]]:15: error: src_vccz register not available on this GPU +// NOGFX1250: :[[@LINE-4]]:15: error: src_vccz register not available on this GPU +// SICI: v_max_f32_e32 v0, src_vccz, v0 ; encoding: [0xfb,0x00,0x00,0x20] v_max_f64 v[0:1], scc, v[0:1] -// SICI: v_max_f64 v[0:1], src_scc, v[0:1] ; encoding: [0x00,0x00,0xce,0xd2,0xfd,0x00,0x02,0x00] -// GFX89: v_max_f64 v[0:1], src_scc, v[0:1] ; encoding: [0x00,0x00,0x83,0xd2,0xfd,0x00,0x02,0x00] -// GFX12XX: v_max_num_f64_e32 v[0:1], src_scc, v[0:1] ; encoding: [0xfd,0x00,0x00,0x1c] // GFX11: v_max_f64 v[0:1], src_scc, v[0:1] ; encoding: [0x00,0x00,0x2a,0xd7,0xfd,0x00,0x02,0x00] +// GFX12XX: v_max_num_f64_e32 v[0:1], src_scc, v[0:1] ; encoding: [0xfd,0x00,0x00,0x1c] +// GFX89: v_max_f64 v[0:1], src_scc, v[0:1] ; encoding: [0x00,0x00,0x83,0xd2,0xfd,0x00,0x02,0x00] +// SICI: v_max_f64 v[0:1], src_scc, v[0:1] ; encoding: [0x00,0x00,0xce,0xd2,0xfd,0x00,0x02,0x00] v_pk_add_f16 v0, execz, v0 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU // GFX9: v_pk_add_f16 v0, src_execz, v0 ; encoding: [0x00,0x40,0x8f,0xd3,0xfc,0x00,0x02,0x18] -// NOVI: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOGFX11: :[[@LINE-4]]:18: error: src_execz register not available on this GPU -// NOGFX12: :[[@LINE-5]]:18: error: src_execz register not available on this GPU -// NOGFX1250: :[[@LINE-6]]:18: error: src_execz register not available on this GPU +// NOGFX11: :[[@LINE-2]]:18: error: src_execz register not available on this GPU +// NOGFX12: :[[@LINE-3]]:18: error: src_execz register not available on this GPU +// NOGFX1250: :[[@LINE-4]]:18: error: src_execz register not available on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-6]]:1: error: instruction not supported on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_ceil_f16 v0, neg(vccz) // GFX89: v_ceil_f16_e64 v0, -src_vccz ; encoding: [0x00,0x00,0x85,0xd1,0xfb,0x00,0x00,0x20] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU -// NOGFX11: :[[@LINE-3]]:20: error: src_vccz register not available on this GPU -// NOGFX12: :[[@LINE-4]]:20: error: src_vccz register not available on this GPU -// NOGFX1250: :[[@LINE-5]]:20: error: src_vccz register not available on this GPU +// NOGFX11: :[[@LINE-2]]:20: error: src_vccz register not available on this GPU +// NOGFX12: :[[@LINE-3]]:20: error: src_vccz register not available on this GPU +// NOGFX1250: :[[@LINE-4]]:20: error: src_vccz register not available on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU // NOSICIVI: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_ceil_f16 v0, abs(scc) -// GFX89: v_ceil_f16_e64 v0, |src_scc| ; encoding: [0x00,0x01,0x85,0xd1,0xfd,0x00,0x00,0x00] -// GFX12XX: v_ceil_f16_e64 v0, |src_scc| ; encoding: [0x00,0x01,0xdc,0xd5,0xfd,0x00,0x00,0x00] -// NOSICI: :[[@LINE-3]]:1: error: instruction not supported on this GPU // GFX11: v_ceil_f16_e64 v0, |src_scc| ; encoding: [0x00,0x01,0xdc,0xd5,0xfd,0x00,0x00,0x00] +// GFX12XX: v_ceil_f16_e64 v0, |src_scc| ; encoding: [0x00,0x01,0xdc,0xd5,0xfd,0x00,0x00,0x00] +// GFX89: v_ceil_f16_e64 v0, |src_scc| ; encoding: [0x00,0x01,0x85,0xd1,0xfd,0x00,0x00,0x00] +// NOSICI: :[[@LINE-4]]:1: error: instruction not supported on this GPU // NOSICIVI: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_ceil_f64 v[5:6], |execz| -// GFX89: v_ceil_f64_e64 v[5:6], |src_execz| ; encoding: [0x05,0x01,0x58,0xd1,0xfc,0x00,0x00,0x00] // CI: v_ceil_f64_e64 v[5:6], |src_execz| ; encoding: [0x05,0x01,0x30,0xd3,0xfc,0x00,0x00,0x00] -// NOSI: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOGFX11: :[[@LINE-4]]:21: error: src_execz register not available on this GPU -// NOGFX12: :[[@LINE-5]]:21: error: src_execz register not available on this GPU -// NOGFX1250: :[[@LINE-6]]:21: error: src_execz register not available on this GPU +// GFX89: v_ceil_f64_e64 v[5:6], |src_execz| ; encoding: [0x05,0x01,0x58,0xd1,0xfc,0x00,0x00,0x00] +// NOGFX11: :[[@LINE-3]]:21: error: src_execz register not available on this GPU +// NOGFX12: :[[@LINE-4]]:21: error: src_execz register not available on this GPU +// NOGFX1250: :[[@LINE-5]]:21: error: src_execz register not available on this GPU +// NOSI: :[[@LINE-6]]:1: error: instruction not supported on this GPU v_ceil_f64 v[5:6], -vcc -// GFX89: v_ceil_f64_e64 v[5:6], -vcc ; encoding: [0x05,0x00,0x58,0xd1,0x6a,0x00,0x00,0x20] // CI: v_ceil_f64_e64 v[5:6], -vcc ; encoding: [0x05,0x00,0x30,0xd3,0x6a,0x00,0x00,0x20] // GFX11: v_ceil_f64_e64 v[5:6], -vcc ; encoding: [0x05,0x00,0x98,0xd5,0x6a,0x00,0x00,0x20] // GFX12: v_ceil_f64_e64 v[5:6], -vcc ; encoding: [0x05,0x00,0x98,0xd5,0x6a,0x00,0x00,0x20] -// NOSI: :[[@LINE-5]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-6]]:12: error: invalid operand for instruction +// GFX89: v_ceil_f64_e64 v[5:6], -vcc ; encoding: [0x05,0x00,0x58,0xd1,0x6a,0x00,0x00,0x20] +// NOGFX1250: :[[@LINE-5]]:12: error: invalid operand for instruction +// NOSI: :[[@LINE-6]]:1: error: instruction not supported on this GPU v_ceil_f32 v0, -vccz -// SICI: v_ceil_f32_e64 v0, -src_vccz ; encoding: [0x00,0x00,0x44,0xd3,0xfb,0x00,0x00,0x20] // GFX89: v_ceil_f32_e64 v0, -src_vccz ; encoding: [0x00,0x00,0x5d,0xd1,0xfb,0x00,0x00,0x20] -// NOGFX11: :[[@LINE-3]]:17: error: src_vccz register not available on this GPU -// NOGFX12: :[[@LINE-4]]:17: error: src_vccz register not available on this GPU -// NOGFX1250: :[[@LINE-5]]:17: error: src_vccz register not available on this GPU +// NOGFX11: :[[@LINE-2]]:17: error: src_vccz register not available on this GPU +// NOGFX12: :[[@LINE-3]]:17: error: src_vccz register not available on this GPU +// NOGFX1250: :[[@LINE-4]]:17: error: src_vccz register not available on this GPU +// SICI: v_ceil_f32_e64 v0, -src_vccz ; encoding: [0x00,0x00,0x44,0xd3,0xfb,0x00,0x00,0x20] v_ceil_f32 v0, |execz| -// SICI: v_ceil_f32_e64 v0, |src_execz| ; encoding: [0x00,0x01,0x44,0xd3,0xfc,0x00,0x00,0x00] // GFX89: v_ceil_f32_e64 v0, |src_execz| ; encoding: [0x00,0x01,0x5d,0xd1,0xfc,0x00,0x00,0x00] -// NOGFX11: :[[@LINE-3]]:17: error: src_execz register not available on this GPU -// NOGFX12: :[[@LINE-4]]:17: error: src_execz register not available on this GPU -// NOGFX1250: :[[@LINE-5]]:17: error: src_execz register not available on this GPU +// NOGFX11: :[[@LINE-2]]:17: error: src_execz register not available on this GPU +// NOGFX12: :[[@LINE-3]]:17: error: src_execz register not available on this GPU +// NOGFX1250: :[[@LINE-4]]:17: error: src_execz register not available on this GPU +// SICI: v_ceil_f32_e64 v0, |src_execz| ; encoding: [0x00,0x01,0x44,0xd3,0xfc,0x00,0x00,0x00] v_ceil_f16_sdwa v5, |vccz| dst_sel:DWORD dst_unused:UNUSED_PRESERVE -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU // GFX9: v_ceil_f16_sdwa v5, |src_vccz| dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x8a,0x0a,0x7e,0xfb,0x16,0xa6,0x00] -// NOVI: :[[@LINE-3]]:22: error: invalid operand for instruction -// NOGFX11: :[[@LINE-4]]:1: error: sdwa variant of this instruction is not supported -// NOGFX12: :[[@LINE-5]]:1: error: sdwa variant of this instruction is not supported -// NOGFX1250: :[[@LINE-6]]:1: error: sdwa variant of this instruction is not supported +// NOGFX11: :[[@LINE-2]]:1: error: sdwa variant of this instruction is not supported +// NOGFX12: :[[@LINE-3]]:1: error: sdwa variant of this instruction is not supported +// NOGFX1250: :[[@LINE-4]]:1: error: sdwa variant of this instruction is not supported +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-6]]:22: error: invalid operand for instruction // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_ceil_f16_sdwa v5, -scc dst_sel:DWORD dst_unused:UNUSED_PRESERVE -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU // GFX9: v_ceil_f16_sdwa v5, -src_scc dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x8a,0x0a,0x7e,0xfd,0x16,0x96,0x00] -// NOVI: :[[@LINE-3]]:22: error: invalid operand for instruction -// NOGFX11: :[[@LINE-4]]:1: error: sdwa variant of this instruction is not supported -// NOGFX12: :[[@LINE-5]]:1: error: sdwa variant of this instruction is not supported -// NOGFX1250: :[[@LINE-6]]:1: error: sdwa variant of this instruction is not supported +// NOGFX11: :[[@LINE-2]]:1: error: sdwa variant of this instruction is not supported +// NOGFX12: :[[@LINE-3]]:1: error: sdwa variant of this instruction is not supported +// NOGFX1250: :[[@LINE-4]]:1: error: sdwa variant of this instruction is not supported +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-6]]:22: error: invalid operand for instruction // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_ceil_f32_sdwa v5, vccz dst_sel:DWORD src0_sel:DWORD -// NOSICI: :[[@LINE-1]]:1: error: sdwa variant of this instruction is not supported // GFX9: v_ceil_f32_sdwa v5, src_vccz dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x3a,0x0a,0x7e,0xfb,0x16,0x86,0x00] -// NOVI: :[[@LINE-3]]:21: error: invalid operand for instruction -// NOGFX11: :[[@LINE-4]]:1: error: sdwa variant of this instruction is not supported -// NOGFX12: :[[@LINE-5]]:1: error: sdwa variant of this instruction is not supported -// NOGFX1250: :[[@LINE-6]]:1: error: sdwa variant of this instruction is not supported +// NOGFX11: :[[@LINE-2]]:1: error: sdwa variant of this instruction is not supported +// NOGFX12: :[[@LINE-3]]:1: error: sdwa variant of this instruction is not supported +// NOGFX1250: :[[@LINE-4]]:1: error: sdwa variant of this instruction is not supported +// NOSICI: :[[@LINE-5]]:1: error: sdwa variant of this instruction is not supported +// NOVI: :[[@LINE-6]]:21: error: invalid operand for instruction // NOSICIVI: :[[@LINE-1]]:1: error: sdwa variant of this instruction is not supported v_ceil_f32_sdwa v5, |execz| dst_sel:DWORD src0_sel:DWORD -// NOSICI: :[[@LINE-1]]:1: error: sdwa variant of this instruction is not supported // GFX9: v_ceil_f32_sdwa v5, |src_execz| dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x3a,0x0a,0x7e,0xfc,0x16,0xa6,0x00] -// NOVI: :[[@LINE-3]]:22: error: invalid operand for instruction -// NOGFX11: :[[@LINE-4]]:1: error: sdwa variant of this instruction is not supported -// NOGFX12: :[[@LINE-5]]:1: error: sdwa variant of this instruction is not supported -// NOGFX1250: :[[@LINE-6]]:1: error: sdwa variant of this instruction is not supported +// NOGFX11: :[[@LINE-2]]:1: error: sdwa variant of this instruction is not supported +// NOGFX12: :[[@LINE-3]]:1: error: sdwa variant of this instruction is not supported +// NOGFX1250: :[[@LINE-4]]:1: error: sdwa variant of this instruction is not supported +// NOSICI: :[[@LINE-5]]:1: error: sdwa variant of this instruction is not supported +// NOVI: :[[@LINE-6]]:22: error: invalid operand for instruction // NOSICIVI: :[[@LINE-1]]:1: error: sdwa variant of this instruction is not supported //---------------------------------------------------------------------------// @@ -1317,266 +1317,266 @@ v_ceil_f32_sdwa v5, |execz| dst_sel:DWORD src0_sel:DWORD //---------------------------------------------------------------------------// buffer_atomic_add v0, off, s[0:3], src_shared_base offset:4095 -// NOSICI: :[[@LINE-1]]:36: error: src_shared_base register not available on this GPU -// GFX9: buffer_atomic_add v0, off, s[0:3], src_shared_base offset:4095 ; encoding: [0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0xeb] // GFX11: buffer_atomic_add_u32 v0, off, s[0:3], src_shared_base offset:4095 ; encoding: [0xff,0x0f,0xd4,0xe0,0x00,0x00,0x00,0xeb] -// NOVI: :[[@LINE-4]]:36: error: src_shared_base register not available on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: operands are not valid for this GPU or mode -// NOGFX1250: :[[@LINE-6]]:1: error: operands are not valid for this GPU or mode +// GFX9: buffer_atomic_add v0, off, s[0:3], src_shared_base offset:4095 ; encoding: [0xff,0x0f,0x08,0xe1,0x00,0x00,0x00,0xeb] +// NOGFX12: :[[@LINE-3]]:1: error: operands are not valid for this GPU or mode +// NOGFX1250: :[[@LINE-4]]:1: error: operands are not valid for this GPU or mode +// NOSICI: :[[@LINE-5]]:36: error: src_shared_base register not available on this GPU +// NOVI: :[[@LINE-6]]:36: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:36: error: src_shared_base register not available on this GPU s_add_i32 s0, src_shared_base, s0 +// GFX11: s_add_i32 s0, src_shared_base, s0 ; encoding: [0xeb,0x00,0x00,0x81] // GFX12XX: s_add_co_i32 s0, src_shared_base, s0 ; encoding: [0xeb,0x00,0x00,0x81] -// NOSICI: :[[@LINE-2]]:15: error: src_shared_base register not available on this GPU // GFX9: s_add_i32 s0, src_shared_base, s0 ; encoding: [0xeb,0x00,0x00,0x81] -// GFX11: s_add_i32 s0, src_shared_base, s0 ; encoding: [0xeb,0x00,0x00,0x81] +// NOSICI: :[[@LINE-4]]:15: error: src_shared_base register not available on this GPU // NOVI: :[[@LINE-5]]:15: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:15: error: src_shared_base register not available on this GPU s_add_i32 s0, src_shared_limit, s0 +// GFX11: s_add_i32 s0, src_shared_limit, s0 ; encoding: [0xec,0x00,0x00,0x81] // GFX12XX: s_add_co_i32 s0, src_shared_limit, s0 ; encoding: [0xec,0x00,0x00,0x81] -// NOSICI: :[[@LINE-2]]:15: error: src_shared_limit register not available on this GPU // GFX9: s_add_i32 s0, src_shared_limit, s0 ; encoding: [0xec,0x00,0x00,0x81] -// GFX11: s_add_i32 s0, src_shared_limit, s0 ; encoding: [0xec,0x00,0x00,0x81] +// NOSICI: :[[@LINE-4]]:15: error: src_shared_limit register not available on this GPU // NOVI: :[[@LINE-5]]:15: error: src_shared_limit register not available on this GPU // NOSICIVI: :[[@LINE-1]]:15: error: src_shared_limit register not available on this GPU s_add_i32 s0, src_private_base, s0 +// GFX11: s_add_i32 s0, src_private_base, s0 ; encoding: [0xed,0x00,0x00,0x81] // GFX12XX: s_add_co_i32 s0, src_private_base, s0 ; encoding: [0xed,0x00,0x00,0x81] -// NOSICI: :[[@LINE-2]]:15: error: src_private_base register not available on this GPU // GFX9: s_add_i32 s0, src_private_base, s0 ; encoding: [0xed,0x00,0x00,0x81] -// GFX11: s_add_i32 s0, src_private_base, s0 ; encoding: [0xed,0x00,0x00,0x81] +// NOSICI: :[[@LINE-4]]:15: error: src_private_base register not available on this GPU // NOVI: :[[@LINE-5]]:15: error: src_private_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:15: error: src_private_base register not available on this GPU s_add_i32 s0, src_private_limit, s0 +// GFX11: s_add_i32 s0, src_private_limit, s0 ; encoding: [0xee,0x00,0x00,0x81] // GFX12XX: s_add_co_i32 s0, src_private_limit, s0 ; encoding: [0xee,0x00,0x00,0x81] -// NOSICI: :[[@LINE-2]]:15: error: src_private_limit register not available on this GPU // GFX9: s_add_i32 s0, src_private_limit, s0 ; encoding: [0xee,0x00,0x00,0x81] -// GFX11: s_add_i32 s0, src_private_limit, s0 ; encoding: [0xee,0x00,0x00,0x81] +// NOSICI: :[[@LINE-4]]:15: error: src_private_limit register not available on this GPU // NOVI: :[[@LINE-5]]:15: error: src_private_limit register not available on this GPU // NOSICIVI: :[[@LINE-1]]:15: error: src_private_limit register not available on this GPU s_add_i32 s0, src_pops_exiting_wave_id, s0 -// NOSICI: :[[@LINE-1]]:15: error: src_pops_exiting_wave_id register not available on this GPU // GFX9: s_add_i32 s0, src_pops_exiting_wave_id, s0 ; encoding: [0xef,0x00,0x00,0x81] -// NOVI: :[[@LINE-3]]:15: error: src_pops_exiting_wave_id register not available on this GPU -// NOGFX11: :[[@LINE-4]]:15: error: src_pops_exiting_wave_id register not available on this GPU -// NOGFX12: :[[@LINE-5]]:15: error: src_pops_exiting_wave_id register not available on this GPU -// NOGFX1250: :[[@LINE-6]]:15: error: src_pops_exiting_wave_id register not available on this GPU +// NOGFX11: :[[@LINE-2]]:15: error: src_pops_exiting_wave_id register not available on this GPU +// NOGFX12: :[[@LINE-3]]:15: error: src_pops_exiting_wave_id register not available on this GPU +// NOGFX1250: :[[@LINE-4]]:15: error: src_pops_exiting_wave_id register not available on this GPU +// NOSICI: :[[@LINE-5]]:15: error: src_pops_exiting_wave_id register not available on this GPU +// NOVI: :[[@LINE-6]]:15: error: src_pops_exiting_wave_id register not available on this GPU // NOSICIVI: :[[@LINE-1]]:15: error: src_pops_exiting_wave_id register not available on this GPU s_and_b64 s[0:1], s[0:1], src_shared_base +// GFX11: s_and_b64 s[0:1], s[0:1], src_shared_base ; encoding: [0x00,0xeb,0x80,0x8b] // GFX12XX: s_and_b64 s[0:1], s[0:1], src_shared_base ; encoding: [0x00,0xeb,0x80,0x8b] -// NOSICI: :[[@LINE-2]]:27: error: src_shared_base register not available on this GPU // GFX9: s_and_b64 s[0:1], s[0:1], src_shared_base ; encoding: [0x00,0xeb,0x80,0x86] -// GFX11: s_and_b64 s[0:1], s[0:1], src_shared_base ; encoding: [0x00,0xeb,0x80,0x8b] +// NOSICI: :[[@LINE-4]]:27: error: src_shared_base register not available on this GPU // NOVI: :[[@LINE-5]]:27: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:27: error: src_shared_base register not available on this GPU s_and_b64 s[0:1], s[0:1], src_shared_limit +// GFX11: s_and_b64 s[0:1], s[0:1], src_shared_limit ; encoding: [0x00,0xec,0x80,0x8b] // GFX12XX: s_and_b64 s[0:1], s[0:1], src_shared_limit ; encoding: [0x00,0xec,0x80,0x8b] -// NOSICI: :[[@LINE-2]]:27: error: src_shared_limit register not available on this GPU // GFX9: s_and_b64 s[0:1], s[0:1], src_shared_limit ; encoding: [0x00,0xec,0x80,0x86] -// GFX11: s_and_b64 s[0:1], s[0:1], src_shared_limit ; encoding: [0x00,0xec,0x80,0x8b] +// NOSICI: :[[@LINE-4]]:27: error: src_shared_limit register not available on this GPU // NOVI: :[[@LINE-5]]:27: error: src_shared_limit register not available on this GPU // NOSICIVI: :[[@LINE-1]]:27: error: src_shared_limit register not available on this GPU s_and_b64 s[0:1], s[0:1], src_private_base +// GFX11: s_and_b64 s[0:1], s[0:1], src_private_base ; encoding: [0x00,0xed,0x80,0x8b] // GFX12XX: s_and_b64 s[0:1], s[0:1], src_private_base ; encoding: [0x00,0xed,0x80,0x8b] -// NOSICI: :[[@LINE-2]]:27: error: src_private_base register not available on this GPU // GFX9: s_and_b64 s[0:1], s[0:1], src_private_base ; encoding: [0x00,0xed,0x80,0x86] -// GFX11: s_and_b64 s[0:1], s[0:1], src_private_base ; encoding: [0x00,0xed,0x80,0x8b] +// NOSICI: :[[@LINE-4]]:27: error: src_private_base register not available on this GPU // NOVI: :[[@LINE-5]]:27: error: src_private_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:27: error: src_private_base register not available on this GPU s_and_b64 s[0:1], s[0:1], src_private_limit +// GFX11: s_and_b64 s[0:1], s[0:1], src_private_limit ; encoding: [0x00,0xee,0x80,0x8b] // GFX12XX: s_and_b64 s[0:1], s[0:1], src_private_limit ; encoding: [0x00,0xee,0x80,0x8b] -// NOSICI: :[[@LINE-2]]:27: error: src_private_limit register not available on this GPU // GFX9: s_and_b64 s[0:1], s[0:1], src_private_limit ; encoding: [0x00,0xee,0x80,0x86] -// GFX11: s_and_b64 s[0:1], s[0:1], src_private_limit ; encoding: [0x00,0xee,0x80,0x8b] +// NOSICI: :[[@LINE-4]]:27: error: src_private_limit register not available on this GPU // NOVI: :[[@LINE-5]]:27: error: src_private_limit register not available on this GPU // NOSICIVI: :[[@LINE-1]]:27: error: src_private_limit register not available on this GPU s_and_b64 s[0:1], s[0:1], src_pops_exiting_wave_id -// NOSICI: :[[@LINE-1]]:27: error: src_pops_exiting_wave_id register not available on this GPU // GFX9: s_and_b64 s[0:1], s[0:1], src_pops_exiting_wave_id ; encoding: [0x00,0xef,0x80,0x86] -// NOVI: :[[@LINE-3]]:27: error: src_pops_exiting_wave_id register not available on this GPU -// NOGFX11: :[[@LINE-4]]:27: error: src_pops_exiting_wave_id register not available on this GPU -// NOGFX12: :[[@LINE-5]]:27: error: src_pops_exiting_wave_id register not available on this GPU -// NOGFX1250: :[[@LINE-6]]:27: error: src_pops_exiting_wave_id register not available on this GPU +// NOGFX11: :[[@LINE-2]]:27: error: src_pops_exiting_wave_id register not available on this GPU +// NOGFX12: :[[@LINE-3]]:27: error: src_pops_exiting_wave_id register not available on this GPU +// NOGFX1250: :[[@LINE-4]]:27: error: src_pops_exiting_wave_id register not available on this GPU +// NOSICI: :[[@LINE-5]]:27: error: src_pops_exiting_wave_id register not available on this GPU +// NOVI: :[[@LINE-6]]:27: error: src_pops_exiting_wave_id register not available on this GPU // NOSICIVI: :[[@LINE-1]]:27: error: src_pops_exiting_wave_id register not available on this GPU v_add_u16 v0, src_shared_base, v0 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU // GFX9: v_add_u16_e32 v0, src_shared_base, v0 ; encoding: [0xeb,0x00,0x00,0x4c] -// NOVI: :[[@LINE-3]]:15: error: src_shared_base register not available on this GPU -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-6]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-6]]:15: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_add_u16_sdwa v0, src_shared_base, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU // GFX9: v_add_u16_sdwa v0, src_shared_base, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x00,0x00,0x4c,0xeb,0x06,0x86,0x06] -// NOVI: :[[@LINE-3]]:20: error: src_shared_base register not available on this GPU -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-6]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-6]]:20: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_add_u16_sdwa v0, v0, src_shared_base dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU // GFX9: v_add_u16_sdwa v0, v0, src_shared_base dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0xd6,0x01,0x4c,0x00,0x06,0x06,0x86] -// NOVI: :[[@LINE-3]]:24: error: src_shared_base register not available on this GPU -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-6]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-6]]:24: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_add_u32 v0, src_shared_base, v0 +// GFX11: v_add_nc_u32_e32 v0, src_shared_base, v0 ; encoding: [0xeb,0x00,0x00,0x4a] // GFX12XX: v_add_nc_u32_e32 v0, src_shared_base, v0 ; encoding: [0xeb,0x00,0x00,0x4a] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX9: v_add_u32_e32 v0, src_shared_base, v0 ; encoding: [0xeb,0x00,0x00,0x68] -// GFX11: v_add_nc_u32_e32 v0, src_shared_base, v0 ; encoding: [0xeb,0x00,0x00,0x4a] +// NOSICI: :[[@LINE-4]]:1: error: instruction not supported on this GPU // NOVI: :[[@LINE-5]]:15: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_add_u32_e64 v0, src_shared_base, v0 +// GFX11: v_add_nc_u32_e64 v0, src_shared_base, v0 ; encoding: [0x00,0x00,0x25,0xd5,0xeb,0x00,0x02,0x00] // GFX12XX: v_add_nc_u32_e64 v0, src_shared_base, v0 ; encoding: [0x00,0x00,0x25,0xd5,0xeb,0x00,0x02,0x00] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX9: v_add_u32_e64 v0, src_shared_base, v0 ; encoding: [0x00,0x00,0x34,0xd1,0xeb,0x00,0x02,0x00] -// GFX11: v_add_nc_u32_e64 v0, src_shared_base, v0 ; encoding: [0x00,0x00,0x25,0xd5,0xeb,0x00,0x02,0x00] +// NOSICI: :[[@LINE-4]]:1: error: instruction not supported on this GPU // NOVI: :[[@LINE-5]]:19: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_cmp_eq_i64 vcc, src_shared_base, v[0:1] -// NOSICI: :[[@LINE-1]]:19: error: src_shared_base register not available on this GPU // GFX9: v_cmp_eq_i64_e32 vcc, src_shared_base, v[0:1] ; encoding: [0xeb,0x00,0xc4,0x7d] -// NOVI: :[[@LINE-3]]:19: error: src_shared_base register not available on this GPU -// NOGFX11: :[[@LINE-4]]:1: error: operands are not valid for this GPU or mode -// NOGFX12: :[[@LINE-5]]:1: error: operands are not valid for this GPU or mode -// NOGFX1250: :[[@LINE-6]]:1: error: operands are not valid for this GPU or mode +// NOGFX11: :[[@LINE-2]]:1: error: operands are not valid for this GPU or mode +// NOGFX12: :[[@LINE-3]]:1: error: operands are not valid for this GPU or mode +// NOGFX1250: :[[@LINE-4]]:1: error: operands are not valid for this GPU or mode +// NOSICI: :[[@LINE-5]]:19: error: src_shared_base register not available on this GPU +// NOVI: :[[@LINE-6]]:19: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:19: error: src_shared_base register not available on this GPU v_max_f16 v0, src_shared_base, v0 +// GFX11: v_max_f16_e32 v0, src_shared_base, v0 ; encoding: [0xeb,0x00,0x00,0x72] // GFX12XX: v_max_num_f16_e32 v0, src_shared_base, v0 ; encoding: [0xeb,0x00,0x00,0x62] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX9: v_max_f16_e32 v0, src_shared_base, v0 ; encoding: [0xeb,0x00,0x00,0x5a] -// GFX11: v_max_f16_e32 v0, src_shared_base, v0 ; encoding: [0xeb,0x00,0x00,0x72] +// NOSICI: :[[@LINE-4]]:1: error: instruction not supported on this GPU // NOVI: :[[@LINE-5]]:15: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_max_f32 v0, src_shared_base, v0 +// GFX11: v_max_f32_e32 v0, src_shared_base, v0 ; encoding: [0xeb,0x00,0x00,0x20] // GFX12XX: v_max_num_f32_e32 v0, src_shared_base, v0 ; encoding: [0xeb,0x00,0x00,0x2c] -// NOSICI: :[[@LINE-2]]:15: error: src_shared_base register not available on this GPU // GFX9: v_max_f32_e32 v0, src_shared_base, v0 ; encoding: [0xeb,0x00,0x00,0x16] -// GFX11: v_max_f32_e32 v0, src_shared_base, v0 ; encoding: [0xeb,0x00,0x00,0x20] +// NOSICI: :[[@LINE-4]]:15: error: src_shared_base register not available on this GPU // NOVI: :[[@LINE-5]]:15: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:15: error: src_shared_base register not available on this GPU v_max_f64 v[0:1], src_shared_base, v[0:1] +// GFX11: v_max_f64 v[0:1], src_shared_base, v[0:1] ; encoding: [0x00,0x00,0x2a,0xd7,0xeb,0x00,0x02,0x00] // GFX12XX: v_max_num_f64_e32 v[0:1], src_shared_base, v[0:1] ; encoding: [0xeb,0x00,0x00,0x1c] -// NOSICI: :[[@LINE-2]]:19: error: src_shared_base register not available on this GPU // GFX9: v_max_f64 v[0:1], src_shared_base, v[0:1] ; encoding: [0x00,0x00,0x83,0xd2,0xeb,0x00,0x02,0x00] -// GFX11: v_max_f64 v[0:1], src_shared_base, v[0:1] ; encoding: [0x00,0x00,0x2a,0xd7,0xeb,0x00,0x02,0x00] +// NOSICI: :[[@LINE-4]]:19: error: src_shared_base register not available on this GPU // NOVI: :[[@LINE-5]]:19: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:19: error: src_shared_base register not available on this GPU v_pk_add_f16 v0, src_shared_base, v0 +// GFX11: v_pk_add_f16 v0, src_shared_base, v0 ; encoding: [0x00,0x40,0x0f,0xcc,0xeb,0x00,0x02,0x18] // GFX12XX: v_pk_add_f16 v0, src_shared_base, v0 ; encoding: [0x00,0x40,0x0f,0xcc,0xeb,0x00,0x02,0x18] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX9: v_pk_add_f16 v0, src_shared_base, v0 ; encoding: [0x00,0x40,0x8f,0xd3,0xeb,0x00,0x02,0x18] -// GFX11: v_pk_add_f16 v0, src_shared_base, v0 ; encoding: [0x00,0x40,0x0f,0xcc,0xeb,0x00,0x02,0x18] +// NOSICI: :[[@LINE-4]]:1: error: instruction not supported on this GPU // NOVI: :[[@LINE-5]]:1: error: instruction not supported on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_ceil_f16 v0, neg(src_shared_base) +// GFX11: v_ceil_f16_e64 v0, -src_shared_base ; encoding: [0x00,0x00,0xdc,0xd5,0xeb,0x00,0x00,0x20] // GFX12XX: v_ceil_f16_e64 v0, -src_shared_base ; encoding: [0x00,0x00,0xdc,0xd5,0xeb,0x00,0x00,0x20] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX9: v_ceil_f16_e64 v0, -src_shared_base ; encoding: [0x00,0x00,0x85,0xd1,0xeb,0x00,0x00,0x20] -// GFX11: v_ceil_f16_e64 v0, -src_shared_base ; encoding: [0x00,0x00,0xdc,0xd5,0xeb,0x00,0x00,0x20] +// NOSICI: :[[@LINE-4]]:1: error: instruction not supported on this GPU // NOVI: :[[@LINE-5]]:20: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_ceil_f16 v0, abs(src_shared_base) +// GFX11: v_ceil_f16_e64 v0, |src_shared_base| ; encoding: [0x00,0x01,0xdc,0xd5,0xeb,0x00,0x00,0x00] // GFX12XX: v_ceil_f16_e64 v0, |src_shared_base| ; encoding: [0x00,0x01,0xdc,0xd5,0xeb,0x00,0x00,0x00] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX9: v_ceil_f16_e64 v0, |src_shared_base| ; encoding: [0x00,0x01,0x85,0xd1,0xeb,0x00,0x00,0x00] -// GFX11: v_ceil_f16_e64 v0, |src_shared_base| ; encoding: [0x00,0x01,0xdc,0xd5,0xeb,0x00,0x00,0x00] +// NOSICI: :[[@LINE-4]]:1: error: instruction not supported on this GPU // NOVI: :[[@LINE-5]]:20: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_ceil_f64 v[5:6], |src_shared_base| -// GFX9: v_ceil_f64_e64 v[5:6], |src_shared_base| ; encoding: [0x05,0x01,0x58,0xd1,0xeb,0x00,0x00,0x00] // GFX11: v_ceil_f64_e64 v[5:6], |src_shared_base| ; encoding: [0x05,0x01,0x98,0xd5,0xeb,0x00,0x00,0x00] // GFX12: v_ceil_f64_e64 v[5:6], |src_shared_base| ; encoding: [0x05,0x01,0x98,0xd5,0xeb,0x00,0x00,0x00] -// NOSI: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOCI: :[[@LINE-5]]:21: error: src_shared_base register not available on this GPU -// NOVI: :[[@LINE-6]]:21: error: src_shared_base register not available on this GPU -// NOGFX1250: :[[@LINE-7]]:12: error: invalid operand for instruction +// GFX9: v_ceil_f64_e64 v[5:6], |src_shared_base| ; encoding: [0x05,0x01,0x58,0xd1,0xeb,0x00,0x00,0x00] +// NOCI: :[[@LINE-4]]:21: error: src_shared_base register not available on this GPU +// NOGFX1250: :[[@LINE-5]]:12: error: invalid operand for instruction +// NOSI: :[[@LINE-6]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-7]]:21: error: src_shared_base register not available on this GPU // NOCIVI: :[[@LINE-5]]:21: error: src_shared_base register not available on this GPU v_ceil_f64 v[5:6], -src_shared_base -// GFX9: v_ceil_f64_e64 v[5:6], -src_shared_base ; encoding: [0x05,0x00,0x58,0xd1,0xeb,0x00,0x00,0x20] // GFX11: v_ceil_f64_e64 v[5:6], -src_shared_base ; encoding: [0x05,0x00,0x98,0xd5,0xeb,0x00,0x00,0x20] // GFX12: v_ceil_f64_e64 v[5:6], -src_shared_base ; encoding: [0x05,0x00,0x98,0xd5,0xeb,0x00,0x00,0x20] -// NOSI: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOCI: :[[@LINE-5]]:21: error: src_shared_base register not available on this GPU -// NOVI: :[[@LINE-6]]:21: error: src_shared_base register not available on this GPU -// NOGFX1250: :[[@LINE-7]]:12: error: invalid operand for instruction +// GFX9: v_ceil_f64_e64 v[5:6], -src_shared_base ; encoding: [0x05,0x00,0x58,0xd1,0xeb,0x00,0x00,0x20] +// NOCI: :[[@LINE-4]]:21: error: src_shared_base register not available on this GPU +// NOGFX1250: :[[@LINE-5]]:12: error: invalid operand for instruction +// NOSI: :[[@LINE-6]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-7]]:21: error: src_shared_base register not available on this GPU // NOCIVI: :[[@LINE-5]]:21: error: src_shared_base register not available on this GPU v_ceil_f32 v0, -src_shared_base +// GFX11: v_ceil_f32_e64 v0, -src_shared_base ; encoding: [0x00,0x00,0xa2,0xd5,0xeb,0x00,0x00,0x20] // GFX12XX: v_ceil_f32_e64 v0, -src_shared_base ; encoding: [0x00,0x00,0xa2,0xd5,0xeb,0x00,0x00,0x20] -// NOSICI: :[[@LINE-2]]:17: error: src_shared_base register not available on this GPU // GFX9: v_ceil_f32_e64 v0, -src_shared_base ; encoding: [0x00,0x00,0x5d,0xd1,0xeb,0x00,0x00,0x20] -// GFX11: v_ceil_f32_e64 v0, -src_shared_base ; encoding: [0x00,0x00,0xa2,0xd5,0xeb,0x00,0x00,0x20] +// NOSICI: :[[@LINE-4]]:17: error: src_shared_base register not available on this GPU // NOVI: :[[@LINE-5]]:17: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:17: error: src_shared_base register not available on this GPU v_ceil_f32 v0, |src_shared_base| +// GFX11: v_ceil_f32_e64 v0, |src_shared_base| ; encoding: [0x00,0x01,0xa2,0xd5,0xeb,0x00,0x00,0x00] // GFX12XX: v_ceil_f32_e64 v0, |src_shared_base| ; encoding: [0x00,0x01,0xa2,0xd5,0xeb,0x00,0x00,0x00] -// NOSICI: :[[@LINE-2]]:17: error: src_shared_base register not available on this GPU // GFX9: v_ceil_f32_e64 v0, |src_shared_base| ; encoding: [0x00,0x01,0x5d,0xd1,0xeb,0x00,0x00,0x00] -// GFX11: v_ceil_f32_e64 v0, |src_shared_base| ; encoding: [0x00,0x01,0xa2,0xd5,0xeb,0x00,0x00,0x00] +// NOSICI: :[[@LINE-4]]:17: error: src_shared_base register not available on this GPU // NOVI: :[[@LINE-5]]:17: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:17: error: src_shared_base register not available on this GPU v_ceil_f16_sdwa v5, |src_shared_base| dst_sel:DWORD dst_unused:UNUSED_PRESERVE -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU // GFX9: v_ceil_f16_sdwa v5, |src_shared_base| dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x8a,0x0a,0x7e,0xeb,0x16,0xa6,0x00] -// NOVI: :[[@LINE-3]]:22: error: src_shared_base register not available on this GPU -// NOGFX11: :[[@LINE-4]]:1: error: sdwa variant of this instruction is not supported -// NOGFX12: :[[@LINE-5]]:1: error: sdwa variant of this instruction is not supported -// NOGFX1250: :[[@LINE-6]]:1: error: sdwa variant of this instruction is not supported +// NOGFX11: :[[@LINE-2]]:1: error: sdwa variant of this instruction is not supported +// NOGFX12: :[[@LINE-3]]:1: error: sdwa variant of this instruction is not supported +// NOGFX1250: :[[@LINE-4]]:1: error: sdwa variant of this instruction is not supported +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-6]]:22: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_ceil_f16_sdwa v5, -src_shared_base dst_sel:DWORD dst_unused:UNUSED_PRESERVE -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU // GFX9: v_ceil_f16_sdwa v5, -src_shared_base dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x8a,0x0a,0x7e,0xeb,0x16,0x96,0x00] -// NOVI: :[[@LINE-3]]:22: error: src_shared_base register not available on this GPU -// NOGFX11: :[[@LINE-4]]:1: error: sdwa variant of this instruction is not supported -// NOGFX12: :[[@LINE-5]]:1: error: sdwa variant of this instruction is not supported -// NOGFX1250: :[[@LINE-6]]:1: error: sdwa variant of this instruction is not supported +// NOGFX11: :[[@LINE-2]]:1: error: sdwa variant of this instruction is not supported +// NOGFX12: :[[@LINE-3]]:1: error: sdwa variant of this instruction is not supported +// NOGFX1250: :[[@LINE-4]]:1: error: sdwa variant of this instruction is not supported +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-6]]:22: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_ceil_f32_sdwa v5, src_shared_base dst_sel:DWORD src0_sel:DWORD -// NOSICI: :[[@LINE-1]]:1: error: sdwa variant of this instruction is not supported // GFX9: v_ceil_f32_sdwa v5, src_shared_base dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x3a,0x0a,0x7e,0xeb,0x16,0x86,0x00] -// NOVI: :[[@LINE-3]]:21: error: src_shared_base register not available on this GPU -// NOGFX11: :[[@LINE-4]]:1: error: sdwa variant of this instruction is not supported -// NOGFX12: :[[@LINE-5]]:1: error: sdwa variant of this instruction is not supported -// NOGFX1250: :[[@LINE-6]]:1: error: sdwa variant of this instruction is not supported +// NOGFX11: :[[@LINE-2]]:1: error: sdwa variant of this instruction is not supported +// NOGFX12: :[[@LINE-3]]:1: error: sdwa variant of this instruction is not supported +// NOGFX1250: :[[@LINE-4]]:1: error: sdwa variant of this instruction is not supported +// NOSICI: :[[@LINE-5]]:1: error: sdwa variant of this instruction is not supported +// NOVI: :[[@LINE-6]]:21: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: sdwa variant of this instruction is not supported v_ceil_f32_sdwa v5, |src_shared_base| dst_sel:DWORD src0_sel:DWORD -// NOSICI: :[[@LINE-1]]:1: error: sdwa variant of this instruction is not supported // GFX9: v_ceil_f32_sdwa v5, |src_shared_base| dst_sel:DWORD dst_unused:UNUSED_PRESERVE src0_sel:DWORD ; encoding: [0xf9,0x3a,0x0a,0x7e,0xeb,0x16,0xa6,0x00] -// NOVI: :[[@LINE-3]]:22: error: src_shared_base register not available on this GPU -// NOGFX11: :[[@LINE-4]]:1: error: sdwa variant of this instruction is not supported -// NOGFX12: :[[@LINE-5]]:1: error: sdwa variant of this instruction is not supported -// NOGFX1250: :[[@LINE-6]]:1: error: sdwa variant of this instruction is not supported +// NOGFX11: :[[@LINE-2]]:1: error: sdwa variant of this instruction is not supported +// NOGFX12: :[[@LINE-3]]:1: error: sdwa variant of this instruction is not supported +// NOGFX1250: :[[@LINE-4]]:1: error: sdwa variant of this instruction is not supported +// NOSICI: :[[@LINE-5]]:1: error: sdwa variant of this instruction is not supported +// NOVI: :[[@LINE-6]]:22: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: sdwa variant of this instruction is not supported //---------------------------------------------------------------------------// @@ -1584,206 +1584,206 @@ v_ceil_f32_sdwa v5, |src_shared_base| dst_sel:DWORD src0_sel:DWORD //---------------------------------------------------------------------------// v_add_u32 v0, private_base, s0 -// GFX12XX: v_add_nc_u32_e64 v0, src_private_base, s0 ; encoding: [0x00,0x00,0x25,0xd5,0xed,0x00,0x00,0x00] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_add_nc_u32_e64 v0, src_private_base, s0 ; encoding: [0x00,0x00,0x25,0xd5,0xed,0x00,0x00,0x00] -// NOVI: :[[@LINE-4]]:15: error: src_private_base register not available on this GPU -// NOGFX9: :[[@LINE-5]]:29: error: invalid operand (violates constant bus restrictions) +// GFX12XX: v_add_nc_u32_e64 v0, src_private_base, s0 ; encoding: [0x00,0x00,0x25,0xd5,0xed,0x00,0x00,0x00] +// NOGFX9: :[[@LINE-3]]:29: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-5]]:15: error: src_private_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_add_u32 v0, scc, s0 -// GFX12XX: v_add_nc_u32_e64 v0, src_scc, s0 ; encoding: [0x00,0x00,0x25,0xd5,0xfd,0x00,0x00,0x00] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_add_nc_u32_e64 v0, src_scc, s0 ; encoding: [0x00,0x00,0x25,0xd5,0xfd,0x00,0x00,0x00] -// NOVI: :[[@LINE-4]]:1: error: operands are not valid for this GPU or mode -// NOGFX9: :[[@LINE-5]]:20: error: invalid operand (violates constant bus restrictions) +// GFX12XX: v_add_nc_u32_e64 v0, src_scc, s0 ; encoding: [0x00,0x00,0x25,0xd5,0xfd,0x00,0x00,0x00] +// NOGFX9: :[[@LINE-3]]:20: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-5]]:1: error: operands are not valid for this GPU or mode // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU // v_div_fmas implicitly reads VCC v_div_fmas_f32 v0, shared_base, v0, v1 -// GFX12XX: v_div_fmas_f32 v0, src_shared_base, v0, v1 ; encoding: [0x00,0x00,0x37,0xd6,0xeb,0x00,0x06,0x04] -// NOSICI: :[[@LINE-2]]:20: error: src_shared_base register not available on this GPU // GFX11: v_div_fmas_f32 v0, src_shared_base, v0, v1 ; encoding: [0x00,0x00,0x37,0xd6,0xeb,0x00,0x06,0x04] -// NOVI: :[[@LINE-4]]:20: error: src_shared_base register not available on this GPU -// NOGFX9: :[[@LINE-5]]:20: error: invalid operand (violates constant bus restrictions) +// GFX12XX: v_div_fmas_f32 v0, src_shared_base, v0, v1 ; encoding: [0x00,0x00,0x37,0xd6,0xeb,0x00,0x06,0x04] +// NOGFX9: :[[@LINE-3]]:20: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-4]]:20: error: src_shared_base register not available on this GPU +// NOVI: :[[@LINE-5]]:20: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:20: error: src_shared_base register not available on this GPU // v_div_fmas implicitly reads VCC v_div_fmas_f32 v0, v0, shared_limit, v1 -// GFX12XX: v_div_fmas_f32 v0, v0, src_shared_limit, v1 ; encoding: [0x00,0x00,0x37,0xd6,0x00,0xd9,0x05,0x04] -// NOSICI: :[[@LINE-2]]:24: error: src_shared_limit register not available on this GPU // GFX11: v_div_fmas_f32 v0, v0, src_shared_limit, v1 ; encoding: [0x00,0x00,0x37,0xd6,0x00,0xd9,0x05,0x04] -// NOVI: :[[@LINE-4]]:24: error: src_shared_limit register not available on this GPU -// NOGFX9: :[[@LINE-5]]:24: error: invalid operand (violates constant bus restrictions) +// GFX12XX: v_div_fmas_f32 v0, v0, src_shared_limit, v1 ; encoding: [0x00,0x00,0x37,0xd6,0x00,0xd9,0x05,0x04] +// NOGFX9: :[[@LINE-3]]:24: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-4]]:24: error: src_shared_limit register not available on this GPU +// NOVI: :[[@LINE-5]]:24: error: src_shared_limit register not available on this GPU // NOSICIVI: :[[@LINE-1]]:24: error: src_shared_limit register not available on this GPU // v_div_fmas implicitly reads VCC v_div_fmas_f32 v0, v0, v1, private_limit -// GFX12XX: v_div_fmas_f32 v0, v0, v1, src_private_limit ; encoding: [0x00,0x00,0x37,0xd6,0x00,0x03,0xba,0x03] -// NOSICI: :[[@LINE-2]]:28: error: src_private_limit register not available on this GPU // GFX11: v_div_fmas_f32 v0, v0, v1, src_private_limit ; encoding: [0x00,0x00,0x37,0xd6,0x00,0x03,0xba,0x03] -// NOVI: :[[@LINE-4]]:28: error: src_private_limit register not available on this GPU -// NOGFX9: :[[@LINE-5]]:28: error: invalid operand (violates constant bus restrictions) +// GFX12XX: v_div_fmas_f32 v0, v0, v1, src_private_limit ; encoding: [0x00,0x00,0x37,0xd6,0x00,0x03,0xba,0x03] +// NOGFX9: :[[@LINE-3]]:28: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-4]]:28: error: src_private_limit register not available on this GPU +// NOVI: :[[@LINE-5]]:28: error: src_private_limit register not available on this GPU // NOSICIVI: :[[@LINE-1]]:28: error: src_private_limit register not available on this GPU // v_div_fmas implicitly reads VCC v_div_fmas_f32 v0, execz, v0, v1 -// NOSICI: :[[@LINE-1]]:20: error: invalid operand (violates constant bus restrictions) -// NOGFX89: :[[@LINE-2]]:20: error: invalid operand (violates constant bus restrictions) -// NOGFX11: :[[@LINE-3]]:20: error: src_execz register not available on this GPU -// NOGFX12: :[[@LINE-4]]:20: error: src_execz register not available on this GPU -// NOGFX1250: :[[@LINE-5]]:20: error: src_execz register not available on this GPU +// NOGFX11: :[[@LINE-1]]:20: error: src_execz register not available on this GPU +// NOGFX12: :[[@LINE-2]]:20: error: src_execz register not available on this GPU +// NOGFX1250: :[[@LINE-3]]:20: error: src_execz register not available on this GPU +// NOGFX89: :[[@LINE-4]]:20: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-5]]:20: error: invalid operand (violates constant bus restrictions) // NOSICIVI: :[[@LINE-1]]:20: error: invalid operand (violates constant bus restrictions) // v_div_fmas implicitly reads VCC v_div_fmas_f32 v0, v0, scc, v1 +// GFX11: v_div_fmas_f32 v0, v0, src_scc, v1 ; encoding: [0x00,0x00,0x37,0xd6,0x00,0xfb,0x05,0x04] // GFX12XX: v_div_fmas_f32 v0, v0, src_scc, v1 ; encoding: [0x00,0x00,0x37,0xd6,0x00,0xfb,0x05,0x04] -// NOSICI: :[[@LINE-2]]:24: error: invalid operand (violates constant bus restrictions) // NOGFX89: :[[@LINE-3]]:24: error: invalid operand (violates constant bus restrictions) -// GFX11: v_div_fmas_f32 v0, v0, src_scc, v1 ; encoding: [0x00,0x00,0x37,0xd6,0x00,0xfb,0x05,0x04] +// NOSICI: :[[@LINE-4]]:24: error: invalid operand (violates constant bus restrictions) // NOSICIVI: :[[@LINE-1]]:24: error: invalid operand (violates constant bus restrictions) // v_div_fmas implicitly reads VCC v_div_fmas_f32 v0, v0, v1, vccz -// NOSICI: :[[@LINE-1]]:28: error: invalid operand (violates constant bus restrictions) -// NOGFX89: :[[@LINE-2]]:28: error: invalid operand (violates constant bus restrictions) -// NOGFX11: :[[@LINE-3]]:28: error: src_vccz register not available on this GPU -// NOGFX12: :[[@LINE-4]]:28: error: src_vccz register not available on this GPU -// NOGFX1250: :[[@LINE-5]]:28: error: src_vccz register not available on this GPU +// NOGFX11: :[[@LINE-1]]:28: error: src_vccz register not available on this GPU +// NOGFX12: :[[@LINE-2]]:28: error: src_vccz register not available on this GPU +// NOGFX1250: :[[@LINE-3]]:28: error: src_vccz register not available on this GPU +// NOGFX89: :[[@LINE-4]]:28: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-5]]:28: error: invalid operand (violates constant bus restrictions) // NOSICIVI: :[[@LINE-1]]:28: error: invalid operand (violates constant bus restrictions) // v_addc_co_u32 implicitly reads VCC (VOP2) v_addc_co_u32 v0, vcc, shared_base, v0, vcc -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOVI: :[[@LINE-2]]:1: error: instruction not supported on this GPU -// NOGFX9: :[[@LINE-3]]:24: error: invalid operand (violates constant bus restrictions) -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-6]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-1]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX9: :[[@LINE-4]]:24: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-6]]:1: error: instruction not supported on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_madak_f32 v0, shared_base, v0, 0x11213141 -// NOSICI: :[[@LINE-1]]:17: error: src_shared_base register not available on this GPU -// NOVI: :[[@LINE-2]]:17: error: src_shared_base register not available on this GPU -// NOGFX9: :[[@LINE-3]]:17: error: invalid operand (violates constant bus restrictions) -// NOGFX11: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-5]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-6]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-1]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX9: :[[@LINE-4]]:17: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-5]]:17: error: src_shared_base register not available on this GPU +// NOVI: :[[@LINE-6]]:17: error: src_shared_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:17: error: src_shared_base register not available on this GPU v_madak_f32 v0, scc, v0, 0x11213141 -// NOSICI: :[[@LINE-1]]:17: error: invalid operand (violates constant bus restrictions) -// NOGFX89: :[[@LINE-2]]:17: error: invalid operand (violates constant bus restrictions) -// NOGFX11: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-1]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:17: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-5]]:17: error: invalid operand (violates constant bus restrictions) // NOSICIVI: :[[@LINE-1]]:17: error: invalid operand (violates constant bus restrictions) v_madak_f32 v0, 0xff32ff, v0, 0x11213141 -// NOSICI: :[[@LINE-1]]:31: error: only one unique literal operand is allowed -// NOGFX89: :[[@LINE-2]]:31: error: only one unique literal operand is allowed -// NOGFX11: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-1]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:31: error: only one unique literal operand is allowed +// NOSICI: :[[@LINE-5]]:31: error: only one unique literal operand is allowed // NOSICIVI: :[[@LINE-1]]:31: error: only one unique literal operand is allowed v_madak_f32 v0, 0xff32ff, v0, 1 -// NOSICI: :[[@LINE-1]]:31: error: only one unique literal operand is allowed -// NOGFX89: :[[@LINE-2]]:31: error: only one unique literal operand is allowed -// NOGFX11: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-1]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:31: error: only one unique literal operand is allowed +// NOSICI: :[[@LINE-5]]:31: error: only one unique literal operand is allowed // NOSICIVI: :[[@LINE-1]]:31: error: only one unique literal operand is allowed v_madmk_f32 v0, 0xff32ff, 0x11213141, v0 -// NOSICI: :[[@LINE-1]]:27: error: only one unique literal operand is allowed -// NOGFX89: :[[@LINE-2]]:27: error: only one unique literal operand is allowed -// NOGFX11: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-1]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:27: error: only one unique literal operand is allowed +// NOSICI: :[[@LINE-5]]:27: error: only one unique literal operand is allowed // NOSICIVI: :[[@LINE-1]]:27: error: only one unique literal operand is allowed v_madmk_f32 v0, 0xff32ff, -1, v0 -// NOSICI: :[[@LINE-1]]:27: error: only one unique literal operand is allowed -// NOGFX89: :[[@LINE-2]]:27: error: only one unique literal operand is allowed -// NOGFX11: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-1]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:27: error: only one unique literal operand is allowed +// NOSICI: :[[@LINE-5]]:27: error: only one unique literal operand is allowed // NOSICIVI: :[[@LINE-1]]:27: error: only one unique literal operand is allowed v_madak_f16 v0, 0xff32, v0, 0x1122 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:29: error: only one unique literal operand is allowed -// NOGFX11: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-1]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:29: error: only one unique literal operand is allowed +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_madak_f16 v0, 0xff32, v0, 0 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:29: error: only one unique literal operand is allowed -// NOGFX11: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-1]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:29: error: only one unique literal operand is allowed +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_madmk_f16 v0, 0xff32, 0x1122, v0 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:25: error: only one unique literal operand is allowed -// NOGFX11: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-1]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:25: error: only one unique literal operand is allowed +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_madmk_f16 v0, 0xff32, 1, v0 -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOGFX89: :[[@LINE-2]]:25: error: only one unique literal operand is allowed -// NOGFX11: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-1]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:25: error: only one unique literal operand is allowed +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_cmp_eq_f32 s[0:1], private_base, private_limit -// NOSICI: :[[@LINE-1]]:22: error: src_private_base register not available on this GPU -// NOVI: :[[@LINE-2]]:22: error: src_private_base register not available on this GPU -// NOGFX9: :[[@LINE-3]]:36: error: invalid operand (violates constant bus restrictions) -// NOGFX11: :[[@LINE-4]]:14: error: invalid operand for instruction -// NOGFX12: :[[@LINE-5]]:14: error: invalid operand for instruction -// NOGFX1250: :[[@LINE-6]]:14: error: invalid operand for instruction +// NOGFX11: :[[@LINE-1]]:14: error: invalid operand for instruction +// NOGFX12: :[[@LINE-2]]:14: error: invalid operand for instruction +// NOGFX1250: :[[@LINE-3]]:14: error: invalid operand for instruction +// NOGFX9: :[[@LINE-4]]:36: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-5]]:22: error: src_private_base register not available on this GPU +// NOVI: :[[@LINE-6]]:22: error: src_private_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:22: error: src_private_base register not available on this GPU v_cmp_eq_f32 s[0:1], private_base, s0 -// NOSICI: :[[@LINE-1]]:22: error: src_private_base register not available on this GPU -// NOVI: :[[@LINE-2]]:22: error: src_private_base register not available on this GPU -// NOGFX9: :[[@LINE-3]]:36: error: invalid operand (violates constant bus restrictions) -// NOGFX11: :[[@LINE-4]]:14: error: invalid operand for instruction -// NOGFX12: :[[@LINE-5]]:14: error: invalid operand for instruction -// NOGFX1250: :[[@LINE-6]]:14: error: invalid operand for instruction +// NOGFX11: :[[@LINE-1]]:14: error: invalid operand for instruction +// NOGFX12: :[[@LINE-2]]:14: error: invalid operand for instruction +// NOGFX1250: :[[@LINE-3]]:14: error: invalid operand for instruction +// NOGFX9: :[[@LINE-4]]:36: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-5]]:22: error: src_private_base register not available on this GPU +// NOVI: :[[@LINE-6]]:22: error: src_private_base register not available on this GPU // NOSICIVI: :[[@LINE-1]]:22: error: src_private_base register not available on this GPU v_cmp_eq_f32 s[0:1], execz, s0 -// NOSICI: :[[@LINE-1]]:29: error: invalid operand (violates constant bus restrictions) -// NOGFX89: :[[@LINE-2]]:29: error: invalid operand (violates constant bus restrictions) -// NOGFX11: :[[@LINE-3]]:22: error: src_execz register not available on this GPU -// NOGFX12: :[[@LINE-4]]:22: error: src_execz register not available on this GPU -// NOGFX1250: :[[@LINE-5]]:22: error: src_execz register not available on this GPU +// NOGFX11: :[[@LINE-1]]:22: error: src_execz register not available on this GPU +// NOGFX12: :[[@LINE-2]]:22: error: src_execz register not available on this GPU +// NOGFX1250: :[[@LINE-3]]:22: error: src_execz register not available on this GPU +// NOGFX89: :[[@LINE-4]]:29: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-5]]:29: error: invalid operand (violates constant bus restrictions) // NOSICIVI: :[[@LINE-1]]:29: error: invalid operand (violates constant bus restrictions) v_pk_add_f16 v255, private_base, private_limit -// GFX12XX: v_pk_add_f16 v255, src_private_base, src_private_limit ; encoding: [0xff,0x40,0x0f,0xcc,0xed,0xdc,0x01,0x18] -// NOSICI: :[[@LINE-2]]:1: error: instruction not supported on this GPU // GFX11: v_pk_add_f16 v255, src_private_base, src_private_limit ; encoding: [0xff,0x40,0x0f,0xcc,0xed,0xdc,0x01,0x18] -// NOVI: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX9: :[[@LINE-5]]:34: error: invalid operand (violates constant bus restrictions) +// GFX12XX: v_pk_add_f16 v255, src_private_base, src_private_limit ; encoding: [0xff,0x40,0x0f,0xcc,0xed,0xdc,0x01,0x18] +// NOGFX9: :[[@LINE-3]]:34: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-5]]:1: error: instruction not supported on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU v_pk_add_f16 v255, vccz, execz -// NOSICI: :[[@LINE-1]]:1: error: instruction not supported on this GPU -// NOVI: :[[@LINE-2]]:1: error: instruction not supported on this GPU -// NOGFX9: :[[@LINE-3]]:26: error: invalid operand (violates constant bus restrictions) -// NOGFX11: :[[@LINE-4]]:20: error: src_vccz register not available on this GPU -// NOGFX12: :[[@LINE-5]]:20: error: src_vccz register not available on this GPU -// NOGFX1250: :[[@LINE-6]]:20: error: src_vccz register not available on this GPU +// NOGFX11: :[[@LINE-1]]:20: error: src_vccz register not available on this GPU +// NOGFX12: :[[@LINE-2]]:20: error: src_vccz register not available on this GPU +// NOGFX1250: :[[@LINE-3]]:20: error: src_vccz register not available on this GPU +// NOGFX9: :[[@LINE-4]]:26: error: invalid operand (violates constant bus restrictions) +// NOSICI: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOVI: :[[@LINE-6]]:1: error: instruction not supported on this GPU // NOSICIVI: :[[@LINE-1]]:1: error: instruction not supported on this GPU //---------------------------------------------------------------------------// @@ -1791,36 +1791,36 @@ v_pk_add_f16 v255, vccz, execz //---------------------------------------------------------------------------// v_sqrt_f32 v2, lit(123) -// SICI: v_sqrt_f32_e32 v2, lit(0x7b) ; encoding: [0xff,0x66,0x04,0x7e,0x7b,0x00,0x00,0x00] -// GFX89: v_sqrt_f32_e32 v2, lit(0x7b) ; encoding: [0xff,0x4e,0x04,0x7e,0x7b,0x00,0x00,0x00] -// GFX12XX: v_sqrt_f32_e32 v2, lit(0x7b) ; encoding: [0xff,0x66,0x04,0x7e,0x7b,0x00,0x00,0x00] // GFX11: v_sqrt_f32_e32 v2, lit(0x7b) ; encoding: [0xff,0x66,0x04,0x7e,0x7b,0x00,0x00,0x00] +// GFX12XX: v_sqrt_f32_e32 v2, lit(0x7b) ; encoding: [0xff,0x66,0x04,0x7e,0x7b,0x00,0x00,0x00] +// GFX89: v_sqrt_f32_e32 v2, lit(0x7b) ; encoding: [0xff,0x4e,0x04,0x7e,0x7b,0x00,0x00,0x00] +// SICI: v_sqrt_f32_e32 v2, lit(0x7b) ; encoding: [0xff,0x66,0x04,0x7e,0x7b,0x00,0x00,0x00] v_sqrt_f32 v2, abs(lit(123)) -// SICI: v_sqrt_f32_e32 v2, lit(0x7b) ; encoding: [0xff,0x66,0x04,0x7e,0x7b,0x00,0x00,0x00] -// GFX89: v_sqrt_f32_e32 v2, lit(0x7b) ; encoding: [0xff,0x4e,0x04,0x7e,0x7b,0x00,0x00,0x00] -// GFX12XX: v_sqrt_f32_e32 v2, lit(0x7b) ; encoding: [0xff,0x66,0x04,0x7e,0x7b,0x00,0x00,0x00] // GFX11: v_sqrt_f32_e32 v2, lit(0x7b) ; encoding: [0xff,0x66,0x04,0x7e,0x7b,0x00,0x00,0x00] +// GFX12XX: v_sqrt_f32_e32 v2, lit(0x7b) ; encoding: [0xff,0x66,0x04,0x7e,0x7b,0x00,0x00,0x00] +// GFX89: v_sqrt_f32_e32 v2, lit(0x7b) ; encoding: [0xff,0x4e,0x04,0x7e,0x7b,0x00,0x00,0x00] +// SICI: v_sqrt_f32_e32 v2, lit(0x7b) ; encoding: [0xff,0x66,0x04,0x7e,0x7b,0x00,0x00,0x00] v_sqrt_f32 v2, lit(123.0) -// SICI: v_sqrt_f32_e32 v2, lit(0x42f60000) ; encoding: [0xff,0x66,0x04,0x7e,0x00,0x00,0xf6,0x42] -// GFX89: v_sqrt_f32_e32 v2, lit(0x42f60000) ; encoding: [0xff,0x4e,0x04,0x7e,0x00,0x00,0xf6,0x42] -// GFX12XX: v_sqrt_f32_e32 v2, lit(0x42f60000) ; encoding: [0xff,0x66,0x04,0x7e,0x00,0x00,0xf6,0x42] // GFX11: v_sqrt_f32_e32 v2, lit(0x42f60000) ; encoding: [0xff,0x66,0x04,0x7e,0x00,0x00,0xf6,0x42] +// GFX12XX: v_sqrt_f32_e32 v2, lit(0x42f60000) ; encoding: [0xff,0x66,0x04,0x7e,0x00,0x00,0xf6,0x42] +// GFX89: v_sqrt_f32_e32 v2, lit(0x42f60000) ; encoding: [0xff,0x4e,0x04,0x7e,0x00,0x00,0xf6,0x42] +// SICI: v_sqrt_f32_e32 v2, lit(0x42f60000) ; encoding: [0xff,0x66,0x04,0x7e,0x00,0x00,0xf6,0x42] v_sqrt_f64 v[2:3], lit(123.0) -// SICI: v_sqrt_f64_e32 v[2:3], lit(0x405ec000) ; encoding: [0xff,0x68,0x04,0x7e,0x00,0xc0,0x5e,0x40] -// GFX89: v_sqrt_f64_e32 v[2:3], lit(0x405ec000) ; encoding: [0xff,0x50,0x04,0x7e,0x00,0xc0,0x5e,0x40] // GFX11: v_sqrt_f64_e32 v[2:3], lit(0x405ec000) ; encoding: [0xff,0x68,0x04,0x7e,0x00,0xc0,0x5e,0x40] // GFX12: v_sqrt_f64_e32 v[2:3], lit(0x405ec000) ; encoding: [0xff,0x68,0x04,0x7e,0x00,0xc0,0x5e,0x40] // GFX1250: v_sqrt_f64_e32 v[2:3], lit(0x405ec000) ; encoding: [0xfe,0x68,0x04,0x7e,0x00,0xc0,0x5e,0x40,0x00,0x00,0x00,0x00] +// GFX89: v_sqrt_f64_e32 v[2:3], lit(0x405ec000) ; encoding: [0xff,0x50,0x04,0x7e,0x00,0xc0,0x5e,0x40] +// SICI: v_sqrt_f64_e32 v[2:3], lit(0x405ec000) ; encoding: [0xff,0x68,0x04,0x7e,0x00,0xc0,0x5e,0x40] v_sqrt_f64 v[2:3], lit(123) -// SICI: v_sqrt_f64_e32 v[2:3], lit(0x7b) ; encoding: [0xff,0x68,0x04,0x7e,0x7b,0x00,0x00,0x00] -// GFX89: v_sqrt_f64_e32 v[2:3], lit(0x7b) ; encoding: [0xff,0x50,0x04,0x7e,0x7b,0x00,0x00,0x00] // GFX11: v_sqrt_f64_e32 v[2:3], lit(0x7b) ; encoding: [0xff,0x68,0x04,0x7e,0x7b,0x00,0x00,0x00] // GFX12: v_sqrt_f64_e32 v[2:3], lit(0x7b) ; encoding: [0xff,0x68,0x04,0x7e,0x7b,0x00,0x00,0x00] // GFX1250: v_sqrt_f64_e32 v[2:3], lit(0x7b) ; encoding: [0xfe,0x68,0x04,0x7e,0x7b,0x00,0x00,0x00,0x00,0x00,0x00,0x00] +// GFX89: v_sqrt_f64_e32 v[2:3], lit(0x7b) ; encoding: [0xff,0x50,0x04,0x7e,0x7b,0x00,0x00,0x00] +// SICI: v_sqrt_f64_e32 v[2:3], lit(0x7b) ; encoding: [0xff,0x68,0x04,0x7e,0x7b,0x00,0x00,0x00] v_sqrt_f32 v2, lit 123.0 // NOGCN: :[[@LINE-1]]:20: error: expected left paren after lit @@ -1834,16 +1834,16 @@ v_sqrt_f32 v2, lit(v1) // Make sure lit() is accepted on operands without modifiers. v_madak_f32 v4, lit(0x7e8), v8, lit(0x7e8) -// SICI: v_madak_f32 v4, lit(0x7e8), v8, lit(0x7e8) ; encoding: [0xff,0x10,0x08,0x42,0xe8,0x07,0x00,0x00] // GFX89: v_madak_f32 v4, lit(0x7e8), v8, lit(0x7e8) ; encoding: [0xff,0x10,0x08,0x30,0xe8,0x07,0x00,0x00] -// NOGFX11: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-4]]:1: error: instruction not supported on this GPU +// SICI: v_madak_f32 v4, lit(0x7e8), v8, lit(0x7e8) ; encoding: [0xff,0x10,0x08,0x42,0xe8,0x07,0x00,0x00] v_madak_f32 v4, lit(lit(0x7e8)), v8, lit(0x7e8) -// NOSICI: :[[@LINE-1]]:24: error: not a valid operand. -// NOGFX89: :[[@LINE-2]]:24: error: not a valid operand. -// NOGFX11: :[[@LINE-3]]:1: error: instruction not supported on this GPU -// NOGFX12: :[[@LINE-4]]:1: error: instruction not supported on this GPU -// NOGFX1250: :[[@LINE-5]]:1: error: instruction not supported on this GPU +// NOGFX11: :[[@LINE-1]]:1: error: instruction not supported on this GPU +// NOGFX12: :[[@LINE-2]]:1: error: instruction not supported on this GPU +// NOGFX1250: :[[@LINE-3]]:1: error: instruction not supported on this GPU +// NOGFX89: :[[@LINE-4]]:24: error: not a valid operand. +// NOSICI: :[[@LINE-5]]:24: error: not a valid operand. // NOSICIVI: :[[@LINE-1]]:24: error: not a valid operand. diff --git a/llvm/test/MC/RISCV/xsfvfexp.s b/llvm/test/MC/RISCV/xsfvfexp.s new file mode 100644 index 0000000..bd6aecd --- /dev/null +++ b/llvm/test/MC/RISCV/xsfvfexp.s @@ -0,0 +1,29 @@ +# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+xsfvfexp32e %s \ +# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST +# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \ +# RUN: | FileCheck %s --check-prefix=CHECK-ERROR +# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+xsfvfexp32e %s \ +# RUN: | llvm-objdump -d --mattr=+xsfvfexp32e - \ +# RUN: | FileCheck %s --check-prefix=CHECK-INST +# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+xsfvfexp32e %s \ +# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN +# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+xsfvfexp16e %s \ +# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST +# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+xsfvfexp16e %s \ +# RUN: | llvm-objdump -d --mattr=+xsfvfexp16e - \ +# RUN: | FileCheck %s --check-prefix=CHECK-INST +# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+xsfvfexp16e %s \ +# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN +# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+zvfbfmin,+xsfvfbfexp16e %s \ +# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST +# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zvfbfmin,+xsfvfbfexp16e %s \ +# RUN: | llvm-objdump -d --mattr=+xsfvfbfexp16e - \ +# RUN: | FileCheck %s --check-prefix=CHECK-INST +# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+zvfbfmin,+xsfvfbfexp16e %s \ +# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN + +sf.vfexp.v v2, v5, v0.t +# CHECK-INST: sf.vfexp.v v2, v5, v0.t +# CHECK-ENCODING: [0x57,0x91,0x53,0x4c] +# CHECK-ERROR: instruction requires the following: 'Xsfvfbfexp16e', 'Xsfvfexp16e', or 'Xsfvfexp32e' (SiFive Vector Floating-Point Exponential Function Instruction){{$}} +# CHECK-UNKNOWN: 4c539157 <unknown> diff --git a/llvm/test/MC/RISCV/xsfvfexpa.s b/llvm/test/MC/RISCV/xsfvfexpa.s new file mode 100644 index 0000000..317a103 --- /dev/null +++ b/llvm/test/MC/RISCV/xsfvfexpa.s @@ -0,0 +1,15 @@ +# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+xsfvfexpa %s \ +# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST +# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \ +# RUN: | FileCheck %s --check-prefix=CHECK-ERROR +# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+xsfvfexpa %s \ +# RUN: | llvm-objdump -d --mattr=+xsfvfexpa - \ +# RUN: | FileCheck %s --check-prefix=CHECK-INST +# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+xsfvfexpa %s \ +# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN + +sf.vfexpa.v v2, v5, v0.t +# CHECK-INST: sf.vfexpa.v v2, v5, v0.t +# CHECK-ENCODING: [0x57,0x11,0x53,0x4c] +# CHECK-ERROR: instruction requires the following: 'Xsfvfexpa' (SiFive Vector Floating-Point Exponential Approximation Instruction){{$}} +# CHECK-UNKNOWN: 4c531157 <unknown> diff --git a/llvm/test/ThinLTO/AArch64/aarch64_inline.ll b/llvm/test/ThinLTO/AArch64/aarch64_inline.ll new file mode 100644 index 0000000..401f66d --- /dev/null +++ b/llvm/test/ThinLTO/AArch64/aarch64_inline.ll @@ -0,0 +1,86 @@ +;; Test verifies inlining happens cross module when module flags are upgraded. +;; `foo` and `main` are both old semantic while bar is the new semantic. +;; Regression test for #82763 + +; RUN: split-file %s %t +; RUN: opt -module-summary %t/foo.ll -o %t/foo.o +; RUN: opt -module-summary %t/bar.ll -o %t/bar.o +; RUN: opt -module-summary %t/main.ll -o %t/main.o +; RUN: llvm-lto2 run %t/main.o %t/foo.o %t/bar.o -save-temps \ +; RUN: -o %t/t.exe \ +; RUN: -r=%t/foo.o,foo,plx \ +; RUN: -r=%t/bar.o,bar,plx \ +; RUN: -r=%t/main.o,foo,l \ +; RUN: -r=%t/main.o,bar,l \ +; RUN: -r=%t/main.o,main,plx 2>&1 +; RUN: llvm-dis %t/t.exe.1.4.opt.bc -o - | FileCheck %s + +; CHECK: define dso_local noundef i32 @main() local_unnamed_addr #0 { +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i32 35 +; CHECK-NEXT: } + +; CHECK: attributes #0 = { {{.*}}"branch-target-enforcement" "sign-return-address"="all" "sign-return-address-key"="b_key" } + +; CHECK: !llvm.module.flags = !{!0, !1, !2, !3} + +; CHECK: !0 = !{i32 8, !"branch-target-enforcement", i32 2} +; CHECK: !1 = !{i32 8, !"sign-return-address", i32 2} +; CHECK: !2 = !{i32 8, !"sign-return-address-all", i32 2} +; CHECK: !3 = !{i32 8, !"sign-return-address-with-bkey", i32 2} + + +;--- foo.ll +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +define dso_local noundef i32 @foo() local_unnamed_addr #0 { +entry: + ret i32 34 +} + +attributes #0 = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) } +!llvm.module.flags = !{!0, !1, !2, !3 } +!0 = !{i32 8, !"branch-target-enforcement", i32 1} +!1 = !{i32 8, !"sign-return-address", i32 1} +!2 = !{i32 8, !"sign-return-address-all", i32 1} +!3 = !{i32 8, !"sign-return-address-with-bkey", i32 1} + +;--- bar.ll +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +define dso_local noundef i32 @bar() local_unnamed_addr #0 { +entry: + ret i32 1 +} + +attributes #0 = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) "branch-target-enforcement" "sign-return-address"="all" "sign-return-address-key"="b_key" } +!llvm.module.flags = !{!0, !1, !2, !3 } +!0 = !{i32 8, !"branch-target-enforcement", i32 2} +!1 = !{i32 8, !"sign-return-address", i32 2} +!2 = !{i32 8, !"sign-return-address-all", i32 2} +!3 = !{i32 8, !"sign-return-address-with-bkey", i32 2} + +;--- main.ll +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +declare i32 @foo(); +declare i32 @bar(); + +define i32 @main() #0 { +entry: + %1 = call i32 @foo() + %2 = call i32 @bar() + %3 = add i32 %1, %2 + ret i32 %3 +} + +attributes #0 = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) } + +!llvm.module.flags = !{!0, !1, !2, !3 } +!0 = !{i32 8, !"branch-target-enforcement", i32 1} +!1 = !{i32 8, !"sign-return-address", i32 1} +!2 = !{i32 8, !"sign-return-address-all", i32 1} +!3 = !{i32 8, !"sign-return-address-with-bkey", i32 1} diff --git a/llvm/test/Transforms/DeadStoreElimination/matrix-intrinsics.ll b/llvm/test/Transforms/DeadStoreElimination/matrix-intrinsics.ll new file mode 100644 index 0000000..ae3c746 --- /dev/null +++ b/llvm/test/Transforms/DeadStoreElimination/matrix-intrinsics.ll @@ -0,0 +1,338 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -passes=dse -S %s | FileCheck %s + +define void @dead_unstrided_store_non_matrix_load(ptr noalias %src, ptr noalias %dst) { +; CHECK-LABEL: define void @dead_unstrided_store_non_matrix_load( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L:%.*]] = load double, ptr [[SRC]], align 8 +; CHECK-NEXT: ret void +; +entry: + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr %dst, i32 4, i1 false, i32 4, i32 2) + %l = load double, ptr %src + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr %dst, i32 4, i1 false, i32 4, i32 2) + ret void +} + +define void @live_unstrided_store_non_matrix_load(ptr noalias %src, ptr noalias %dst) { +; CHECK-LABEL: define void @live_unstrided_store_non_matrix_load( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[L_1:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L_2:%.*]] = load double, ptr [[DST]], align 8 +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L_1]], ptr [[DST]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: ret void +; +entry: + %l.1 = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 4, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr %dst, i32 4, i1 false, i32 4, i32 2) + %l.2 = load double, ptr %dst + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l.1, ptr %dst, i32 4, i1 false, i32 4, i32 2) + ret void +} + +define void @dead_strided_store(ptr noalias %src, ptr noalias %dst) { +; CHECK-LABEL: define void @dead_strided_store( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 100, i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 200, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[DST]], i32 100, i1 false, i32 4, i32 2) +; CHECK-NEXT: ret void +; +entry: + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr %dst, i32 100, i1 false, i32 4, i32 2) + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 200, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l, ptr %dst, i32 100, i1 false, i32 4, i32 2) + ret void +} + +define void @live_strided_store(ptr %ptr) { +; CHECK-LABEL: define void @live_strided_store( +; CHECK-SAME: ptr [[PTR:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[PTR]], i32 100, i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[PTR]], i32 200, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[PTR]], i32 100, i1 false, i32 4, i32 2) +; CHECK-NEXT: ret void +; +entry: + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr %ptr, i32 100, i1 false, i32 4, i32 2) + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %ptr, i32 200, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l, ptr %ptr, i32 100, i1 false, i32 4, i32 2) + ret void +} + +define void @dead_strided_store_non_matrix_load(ptr noalias %src, ptr noalias %dst) { +; CHECK-LABEL: define void @dead_strided_store_non_matrix_load( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[L_1:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 100, i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L:%.*]] = load double, ptr [[SRC]], align 8 +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L_1]], ptr [[DST]], i32 100, i1 false, i32 4, i32 2) +; CHECK-NEXT: ret void +; +entry: + %l.1 = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 4, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr %dst, i32 100, i1 false, i32 4, i32 2) + %l.2 = load double, ptr %src + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l.1, ptr %dst, i32 100, i1 false, i32 4, i32 2) + ret void +} + +define void @live_strided_store_non_matrix_load(ptr noalias %src, ptr noalias %dst) { +; CHECK-LABEL: define void @live_strided_store_non_matrix_load( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[L_1:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 100, i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L_2:%.*]] = load double, ptr [[DST]], align 8 +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L_1]], ptr [[DST]], i32 100, i1 false, i32 4, i32 2) +; CHECK-NEXT: ret void +; +entry: + %l.1 = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 4, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr %dst, i32 100, i1 false, i32 4, i32 2) + %l.2 = load double, ptr %dst + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l.1, ptr %dst, i32 100, i1 false, i32 4, i32 2) + ret void +} + +define void @dead_dynamically_strided_store(ptr noalias %src, ptr noalias %dst, i32 %stride) { +; CHECK-LABEL: define void @dead_dynamically_strided_store( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[STRIDE:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 [[STRIDE]], i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[DST]], i32 [[STRIDE]], i1 false, i32 4, i32 2) +; CHECK-NEXT: ret void +; +entry: + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr %dst, i32 %stride, i1 false, i32 4, i32 2) + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 4, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l, ptr %dst, i32 %stride, i1 false, i32 4, i32 2) + ret void +} + +define void @live_dynamically_strided_store(ptr %ptr, i32 %stride) { +; CHECK-LABEL: define void @live_dynamically_strided_store( +; CHECK-SAME: ptr [[PTR:%.*]], i32 [[STRIDE:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[PTR]], i32 [[STRIDE]], i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[PTR]], i32 [[STRIDE]], i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[PTR]], i32 [[STRIDE]], i1 false, i32 4, i32 2) +; CHECK-NEXT: ret void +; +entry: + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr %ptr, i32 %stride, i1 false, i32 4, i32 2) + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %ptr, i32 %stride, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l, ptr %ptr, i32 %stride, i1 false, i32 4, i32 2) + ret void +} + +define void @dead_dynamically_strided_store_non_matrix_load(ptr noalias %src, ptr noalias %dst, i32 %stride) { +; CHECK-LABEL: define void @dead_dynamically_strided_store_non_matrix_load( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[STRIDE:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[L_1:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 [[STRIDE]], i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L:%.*]] = load double, ptr [[SRC]], align 8 +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L_1]], ptr [[DST]], i32 [[STRIDE]], i1 false, i32 4, i32 2) +; CHECK-NEXT: ret void +; +entry: + %l.1 = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 4, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr %dst, i32 %stride, i1 false, i32 4, i32 2) + %l.2 = load double, ptr %src + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l.1, ptr %dst, i32 %stride, i1 false, i32 4, i32 2) + ret void +} + +define void @live_dynamically_strided_store_non_matrix_load(ptr noalias %src, ptr noalias %dst, i32 %stride) { +; CHECK-LABEL: define void @live_dynamically_strided_store_non_matrix_load( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[STRIDE:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[L_1:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 [[STRIDE]], i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L_2:%.*]] = load double, ptr [[DST]], align 8 +; CHECK-NEXT: ret void +; +entry: + %l.1 = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 4, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr %dst, i32 %stride, i1 false, i32 4, i32 2) + %l.2 = load double, ptr %dst + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr %dst, i32 %stride, i1 false, i32 4, i32 2) + ret void +} + +define void @dead_unstrided_store(ptr noalias %src, ptr noalias %dst) { +; CHECK-LABEL: define void @dead_unstrided_store( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[DST]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: ret void +; +entry: + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr %dst, i32 4, i1 false, i32 4, i32 2) + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 4, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l, ptr %dst, i32 4, i1 false, i32 4, i32 2) + ret void +} + +define void @live_unstrided_store(ptr %ptr) { +; CHECK-LABEL: define void @live_unstrided_store( +; CHECK-SAME: ptr [[PTR:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[PTR]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[PTR]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[PTR]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: ret void +; +entry: + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr %ptr, i32 4, i1 false, i32 4, i32 2) + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %ptr, i32 4, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l, ptr %ptr, i32 4, i1 false, i32 4, i32 2) + ret void +} + +define void @dead_non_matrix_store(ptr noalias %src, ptr noalias %dst) { +; CHECK-LABEL: define void @dead_non_matrix_store( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[DST_OFFSET:%.*]] = getelementptr inbounds double, ptr [[SRC]], i32 6 +; CHECK-NEXT: store double 4.200000e+01, ptr [[DST_OFFSET]], align 8 +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[DST]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: ret void +; +entry: + %dst.offset = getelementptr inbounds double, ptr %src, i32 6 + store double 42.0, ptr %dst.offset + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 4, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l, ptr %dst, i32 4, i1 false, i32 4, i32 2) + ret void +} + +define void @live_non_matrix_store(ptr %ptr) { +; CHECK-LABEL: define void @live_non_matrix_store( +; CHECK-SAME: ptr [[PTR:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR_OFFSET:%.*]] = getelementptr inbounds double, ptr [[PTR]], i32 6 +; CHECK-NEXT: store double 4.200000e+01, ptr [[PTR_OFFSET]], align 8 +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[PTR]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[PTR]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: ret void +; +entry: + %ptr.offset = getelementptr inbounds double, ptr %ptr, i32 6 + store double 42.0, ptr %ptr.offset + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %ptr, i32 4, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l, ptr %ptr, i32 4, i1 false, i32 4, i32 2) + ret void +} + +define void @dead_matrix_store_non_matrix_overwrite_unstrided(ptr noalias %src, ptr noalias %dst) { +; CHECK-LABEL: define void @dead_matrix_store_non_matrix_overwrite_unstrided( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: store <8 x double> zeroinitializer, ptr [[DST]], align 64 +; CHECK-NEXT: ret void +; +entry: + call void @llvm.matrix.column.major.store(<8 x double> zeroinitializer, ptr %dst, i32 4, i1 false, i32 4, i32 2) + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 4, i1 false, i32 4, i32 2) + store <8 x double> zeroinitializer, ptr %dst + ret void +} + +define void @dead_matrix_store_non_matrix_overwrite_strided(ptr noalias %src, ptr noalias %dst) { +; CHECK-LABEL: define void @dead_matrix_store_non_matrix_overwrite_strided( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 8, i1 false, i32 4, i32 2) +; CHECK-NEXT: store <16 x double> zeroinitializer, ptr [[DST]], align 128 +; CHECK-NEXT: ret void +; +entry: + call void @llvm.matrix.column.major.store(<8 x double> zeroinitializer, ptr %dst, i32 4, i1 false, i32 4, i32 2) + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 8, i1 false, i32 4, i32 2) + store <16 x double> zeroinitializer, ptr %dst + ret void +} + +define void @live_matrix_store_non_matrix_overwrite_unstrided(ptr noalias %src, ptr noalias %dst) { +; CHECK-LABEL: define void @live_matrix_store_non_matrix_overwrite_unstrided( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[DST]], align 32 +; CHECK-NEXT: ret void +; +entry: + call void @llvm.matrix.column.major.store(<8 x double> zeroinitializer, ptr %dst, i32 4, i1 false, i32 4, i32 2) + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 4, i1 false, i32 4, i32 2) + store <4 x double> zeroinitializer, ptr %dst + ret void +} + +define void @live_matrix_store_non_matrix_overwrite_strided(ptr noalias %src, ptr noalias %dst) { +; CHECK-LABEL: define void @live_matrix_store_non_matrix_overwrite_strided( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 8, i1 false, i32 4, i32 2) +; CHECK-NEXT: store <8 x double> zeroinitializer, ptr [[DST]], align 64 +; CHECK-NEXT: ret void +; +entry: + call void @llvm.matrix.column.major.store(<8 x double> zeroinitializer, ptr %dst, i32 4, i1 false, i32 4, i32 2) + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 8, i1 false, i32 4, i32 2) + store <8 x double> zeroinitializer, ptr %dst + ret void +} + +define void @dead_matrix_store_dimension_change(ptr noalias %src, ptr noalias %dst) { +; CHECK-LABEL: define void @dead_matrix_store_dimension_change( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 8, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[DST]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v9f64.i32(<9 x double> zeroinitializer, ptr [[DST]], i32 3, i1 false, i32 3, i32 3) +; CHECK-NEXT: ret void +; +entry: + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 8, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l, ptr %dst, i32 4, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v9f64.i32(<9 x double> zeroinitializer, ptr %dst, i32 3, i1 false, i32 3, i32 3) + ret void +} + +define void @live_matrix_store_dimension_change(ptr noalias %src, ptr noalias %dst) { +; CHECK-LABEL: define void @live_matrix_store_dimension_change( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 8, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v9f64.i32(<9 x double> zeroinitializer, ptr [[DST]], i32 3, i1 false, i32 3, i32 3) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[DST]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: ret void +; +entry: + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 8, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v9f64.i32(<9 x double> zeroinitializer, ptr %dst, i32 3, i1 false, i32 3, i32 3) + call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> %l, ptr %dst, i32 4, i1 false, i32 4, i32 2) + ret void +} + +declare <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr, i32, i1, i32, i32) +declare <9 x double> @llvm.matrix.column.major.load.v9f64.i32(ptr, i32, i1, i32, i32) +declare void @llvm.matrix.column.major.store.v8f64.i32(<8 x double>, ptr, i32, i1, i32, i32) diff --git a/llvm/test/Transforms/GVN/matrix-intrinsics.ll b/llvm/test/Transforms/GVN/matrix-intrinsics.ll new file mode 100644 index 0000000..78dbfe1 --- /dev/null +++ b/llvm/test/Transforms/GVN/matrix-intrinsics.ll @@ -0,0 +1,136 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -passes=gvn -S %s | FileCheck %s + +define void @redundant_unstrided_load(ptr %src) { +; CHECK-LABEL: define void @redundant_unstrided_load( +; CHECK-SAME: ptr [[SRC:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SRC_OFFSET:%.*]] = getelementptr inbounds double, ptr [[SRC]], i32 8 +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[SRC]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L_2:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @use(<8 x double> [[L]]) +; CHECK-NEXT: call void @use(<8 x double> [[L_2]]) +; CHECK-NEXT: ret void +; +entry: + %src.offset = getelementptr inbounds double, ptr %src, i32 8 + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 4, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v8f64(<8 x double> %l, ptr %src, i32 4, i1 false, i32 4, i32 2) + %l.2 = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 4, i1 false, i32 4, i32 2) + call void @use(<8 x double> %l) + call void @use(<8 x double> %l.2) + ret void +} + +define void @redundant_unstrided_load_non_matrix_store(ptr %src) { +; CHECK-LABEL: define void @redundant_unstrided_load_non_matrix_store( +; CHECK-SAME: ptr [[SRC:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SRC_OFFSET:%.*]] = getelementptr inbounds double, ptr [[SRC]], i32 1 +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: store double 4.200000e+01, ptr [[SRC]], align 8 +; CHECK-NEXT: [[L_2:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @use(<8 x double> [[L]]) +; CHECK-NEXT: call void @use(<8 x double> [[L_2]]) +; CHECK-NEXT: ret void +; +entry: + %src.offset = getelementptr inbounds double, ptr %src, i32 1 + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 4, i1 false, i32 4, i32 2) + store double 42.0, ptr %src + %l.2 = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 4, i1 false, i32 4, i32 2) + call void @use(<8 x double> %l) + call void @use(<8 x double> %l.2) + ret void +} + +define void @redundant_strided_load(ptr %src) { +; CHECK-LABEL: define void @redundant_strided_load( +; CHECK-SAME: ptr [[SRC:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SRC_OFFSET:%.*]] = getelementptr inbounds double, ptr [[SRC]], i32 16 +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 8, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[SRC]], i32 8, i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L_2:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 8, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @use(<8 x double> [[L]]) +; CHECK-NEXT: call void @use(<8 x double> [[L_2]]) +; CHECK-NEXT: ret void +; +entry: + %src.offset = getelementptr inbounds double, ptr %src, i32 16 + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 8, i1 false, i32 4, i32 2) + call void @llvm.matrix.column.major.store.v8f64(<8 x double> %l, ptr %src, i32 8, i1 false, i32 4, i32 2) + %l.2 = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 8, i1 false, i32 4, i32 2) + call void @use(<8 x double> %l) + call void @use(<8 x double> %l.2) + ret void + +} + +define void @redundant_strided_load_non_matrix_store(ptr %src) { +; CHECK-LABEL: define void @redundant_strided_load_non_matrix_store( +; CHECK-SAME: ptr [[SRC:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SRC_OFFSET:%.*]] = getelementptr inbounds double, ptr [[SRC]], i32 16 +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 8, i1 false, i32 4, i32 2) +; CHECK-NEXT: store double 4.200000e+01, ptr [[SRC]], align 8 +; CHECK-NEXT: [[L_2:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 8, i1 false, i32 4, i32 2) +; CHECK-NEXT: call void @use(<8 x double> [[L]]) +; CHECK-NEXT: call void @use(<8 x double> [[L_2]]) +; CHECK-NEXT: ret void +; +entry: + %src.offset = getelementptr inbounds double, ptr %src, i32 16 + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 8, i1 false, i32 4, i32 2) + store double 42.0, ptr %src + %l.2 = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src.offset, i32 8, i1 false, i32 4, i32 2) + call void @use(<8 x double> %l) + call void @use(<8 x double> %l.2) + ret void +} + +define void @repeat_load_dimension_change_project(ptr %src) { +; CHECK-LABEL: define void @repeat_load_dimension_change_project( +; CHECK-SAME: ptr [[SRC:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L_2:%.*]] = call <9 x double> @llvm.matrix.column.major.load.v9f64.i32(ptr [[SRC]], i32 3, i1 false, i32 3, i32 3) +; CHECK-NEXT: [[L_3:%.*]] = shufflevector <9 x double> [[L_2]], <9 x double> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> +; CHECK-NEXT: call void @use(<8 x double> [[L]]) +; CHECK-NEXT: call void @use(<8 x double> [[L_3]]) +; CHECK-NEXT: ret void +; +entry: + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 4, i1 false, i32 4, i32 2) + %l.2 = call <9 x double> @llvm.matrix.column.major.load.v9f64.i32(ptr %src, i32 3, i1 false, i32 3, i32 3) + %l.3 = shufflevector <9 x double> %l.2, <9 x double> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + call void @use(<8 x double> %l) + call void @use(<8 x double> %l.3) + ret void +} + +define void @repeat_load_dimension_change_shuffle(ptr %src) { +; CHECK-LABEL: define void @repeat_load_dimension_change_shuffle( +; CHECK-SAME: ptr [[SRC:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 4, i1 false, i32 4, i32 2) +; CHECK-NEXT: [[L_2:%.*]] = call <9 x double> @llvm.matrix.column.major.load.v9f64.i32(ptr [[SRC]], i32 3, i1 false, i32 3, i32 3) +; CHECK-NEXT: [[L_3:%.*]] = shufflevector <9 x double> [[L_2]], <9 x double> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> +; CHECK-NEXT: call void @use(<8 x double> [[L]]) +; CHECK-NEXT: call void @use(<8 x double> [[L_3]]) +; CHECK-NEXT: ret void +; +entry: + %l = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %src, i32 4, i1 false, i32 4, i32 2) + %l.2 = call <9 x double> @llvm.matrix.column.major.load.v9f64.i32(ptr %src, i32 3, i1 false, i32 3, i32 3) + %l.3 = shufflevector <9 x double> %l.2, <9 x double> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + call void @use(<8 x double> %l) + call void @use(<8 x double> %l.3) + ret void +} + +declare <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr, i32, i1, i32, i32) +declare <9 x double> @llvm.matrix.column.major.load.v9f64.i32(ptr, i32, i1, i32, i32) +declare void @llvm.matrix.column.major.store.v8f64.i32(<8 x double>, ptr, i32, i1, i32, i32) +declare void @use(<8 x double>) diff --git a/llvm/test/Transforms/IROutliner/outlining-compatible-and-attribute-transfer.ll b/llvm/test/Transforms/IROutliner/outlining-compatible-and-attribute-transfer.ll index b3f2e81..15ce3e3 100644 --- a/llvm/test/Transforms/IROutliner/outlining-compatible-and-attribute-transfer.ll +++ b/llvm/test/Transforms/IROutliner/outlining-compatible-and-attribute-transfer.ll @@ -5,7 +5,7 @@ ; attributes that should be transferred only if it is on all of the regions. ; This includes the attributes, no-nans-fp-math, -; no-signed-zeros-fp-math, less-precise-fpmad, unsafe-fp-math, and +; no-signed-zeros-fp-math, less-precise-fpmad, and ; no-infs-fp-math. Only when each instance of similarity has these attributes ; can we say that the outlined function can have these attributes since that ; is the more general case for these attributes. @@ -101,7 +101,7 @@ entry: } attributes #0 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "less-precise-fpmad"="true" -"unsafe-fp-math"="true" "no-infs-fp-math"="true"} +"no-infs-fp-math"="true"} ; CHECK: define internal void @outlined_ir_func_0(ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]]) [[ATTR1:#[0-9]+]] { ; CHECK: entry_to_outline: @@ -122,5 +122,5 @@ attributes #0 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "les ; CHECK-NEXT: [[CL:%.*]] = load i32, ptr [[ARG2]], align 4 -; CHECK: attributes [[ATTR1]] = { minsize optsize "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "unsafe-fp-math"="false" } -; CHECK: attributes [[ATTR]] = { minsize optsize "less-precise-fpmad"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "unsafe-fp-math"="true" } +; CHECK: attributes [[ATTR1]] = { minsize optsize "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" } +; CHECK: attributes [[ATTR]] = { minsize optsize "less-precise-fpmad"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" } diff --git a/llvm/test/Transforms/Inline/attributes.ll b/llvm/test/Transforms/Inline/attributes.ll index 55ab430..da7eeda 100644 --- a/llvm/test/Transforms/Inline/attributes.ll +++ b/llvm/test/Transforms/Inline/attributes.ll @@ -601,46 +601,6 @@ define i32 @test_no-signed-zeros-fp-math3(i32 %i) "no-signed-zeros-fp-math"="tru ; CHECK-NEXT: ret i32 } -define i32 @unsafe-fp-math_callee0(i32 %i) "unsafe-fp-math"="false" { - ret i32 %i -; CHECK: @unsafe-fp-math_callee0(i32 %i) [[UNSAFE_FPMATH_FALSE:#[0-9]+]] { -; CHECK-NEXT: ret i32 -} - -define i32 @unsafe-fp-math_callee1(i32 %i) "unsafe-fp-math"="true" { - ret i32 %i -; CHECK: @unsafe-fp-math_callee1(i32 %i) [[UNSAFE_FPMATH_TRUE:#[0-9]+]] { -; CHECK-NEXT: ret i32 -} - -define i32 @test_unsafe-fp-math0(i32 %i) "unsafe-fp-math"="false" { - %1 = call i32 @unsafe-fp-math_callee0(i32 %i) - ret i32 %1 -; CHECK: @test_unsafe-fp-math0(i32 %i) [[UNSAFE_FPMATH_FALSE]] { -; CHECK-NEXT: ret i32 -} - -define i32 @test_unsafe-fp-math1(i32 %i) "unsafe-fp-math"="false" { - %1 = call i32 @unsafe-fp-math_callee1(i32 %i) - ret i32 %1 -; CHECK: @test_unsafe-fp-math1(i32 %i) [[UNSAFE_FPMATH_FALSE]] { -; CHECK-NEXT: ret i32 -} - -define i32 @test_unsafe-fp-math2(i32 %i) "unsafe-fp-math"="true" { - %1 = call i32 @unsafe-fp-math_callee0(i32 %i) - ret i32 %1 -; CHECK: @test_unsafe-fp-math2(i32 %i) [[UNSAFE_FPMATH_FALSE]] { -; CHECK-NEXT: ret i32 -} - -define i32 @test_unsafe-fp-math3(i32 %i) "unsafe-fp-math"="true" { - %1 = call i32 @unsafe-fp-math_callee1(i32 %i) - ret i32 %1 -; CHECK: @test_unsafe-fp-math3(i32 %i) [[UNSAFE_FPMATH_TRUE]] { -; CHECK-NEXT: ret i32 -} - ; Test that fn_ret_thunk_extern has no CompatRule; inlining is permitted. ; Test that fn_ret_thunk_extern has no MergeRule; fn_ret_thunk_extern is not ; propagated or dropped on the caller after inlining. @@ -693,6 +653,4 @@ define i32 @loader_replaceable_caller() { ; CHECK: attributes [[NO_NANS_FPMATH_TRUE]] = { "no-nans-fp-math"="true" } ; CHECK: attributes [[NO_SIGNED_ZEROS_FPMATH_FALSE]] = { "no-signed-zeros-fp-math"="false" } ; CHECK: attributes [[NO_SIGNED_ZEROS_FPMATH_TRUE]] = { "no-signed-zeros-fp-math"="true" } -; CHECK: attributes [[UNSAFE_FPMATH_FALSE]] = { "unsafe-fp-math"="false" } -; CHECK: attributes [[UNSAFE_FPMATH_TRUE]] = { "unsafe-fp-math"="true" } ; CHECK: attributes [[FNRETTHUNK_EXTERN]] = { fn_ret_thunk_extern } diff --git a/llvm/test/Transforms/InstCombine/select_with_identical_phi.ll b/llvm/test/Transforms/InstCombine/select_with_identical_phi.ll deleted file mode 100644 index 7816781..0000000 --- a/llvm/test/Transforms/InstCombine/select_with_identical_phi.ll +++ /dev/null @@ -1,243 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -S -passes=instcombine | FileCheck %s -@A = extern_weak global float, align 4 - -; %same.as.v1 is a select with two phis %v1 and %phi.to.remove as the true -; and false values, while %v1 and %phi.to.remove are actually the same. -; Fold the selection instruction %same.as.v1 to %v1. -define void @select_with_identical_phi(ptr %m, ptr %n, i32 %count) { -; CHECK-LABEL: @select_with_identical_phi( -; CHECK-NEXT: entry: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[V0:%.*]] = phi float [ 0x4415AF1D80000000, [[ENTRY:%.*]] ], [ [[V0_1:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[V1:%.*]] = phi float [ 0xC415AF1D80000000, [[ENTRY]] ], [ [[V1_1:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC_I:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[Q:%.*]] = phi ptr [ [[M:%.*]], [[ENTRY]] ], [ [[Q_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[C:%.*]] = phi ptr [ [[N:%.*]], [[ENTRY]] ], [ [[C_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[Q_LOAD:%.*]] = load float, ptr [[Q]], align 4 -; CHECK-NEXT: [[C_LOAD:%.*]] = load float, ptr [[C]], align 4 -; CHECK-NEXT: [[SUB:%.*]] = fsub float [[Q_LOAD]], [[C_LOAD]] -; CHECK-NEXT: [[CMP1:%.*]] = fcmp olt float [[SUB]], [[V0]] -; CHECK-NEXT: [[V0_1]] = select i1 [[CMP1]], float [[SUB]], float [[V0]] -; CHECK-NEXT: [[CMP2:%.*]] = fcmp ogt float [[SUB]], [[V1]] -; CHECK-NEXT: [[V1_1]] = select i1 [[CMP2]], float [[SUB]], float [[V1]] -; CHECK-NEXT: [[INC_I]] = add nuw nsw i32 [[I]], 1 -; CHECK-NEXT: [[Q_NEXT]] = getelementptr inbounds nuw i8, ptr [[Q]], i64 4 -; CHECK-NEXT: [[C_NEXT]] = getelementptr inbounds nuw i8, ptr [[C]], i64 4 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC_I]], [[COUNT:%.*]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[FOR_BODY]] -; CHECK: exit: -; CHECK-NEXT: store float [[V1_1]], ptr @A, align 4 -; CHECK-NEXT: ret void -; -entry: - br label %for.body - -for.body: ; preds = %entry, %for.body - %v0 = phi float [ 0x4415AF1D80000000, %entry ], [ %v0.1, %for.body ] - %v1 = phi float [ 0xC415AF1D80000000, %entry ], [ %v1.1, %for.body ] - %phi.to.remove = phi float [ 0xC415AF1D80000000, %entry ], [ %phi.to.remove.next, %for.body ] - %i = phi i32 [ 0, %entry ], [ %inc.i, %for.body ] - %q = phi ptr [ %m, %entry ], [ %q.next, %for.body ] - %c = phi ptr [ %n, %entry ], [ %c.next, %for.body ] - %q.load = load float, ptr %q - %c.load = load float, ptr %c - %sub = fsub float %q.load, %c.load - %cmp1 = fcmp olt float %sub, %v0 - %v0.1 = select i1 %cmp1, float %sub, float %v0 - %same.as.v1 = select i1 %cmp1, float %v1, float %phi.to.remove - %cmp2 = fcmp ogt float %sub, %same.as.v1 - %v1.1 = select i1 %cmp2, float %sub, float %v1 - %phi.to.remove.next = select i1 %cmp2, float %sub, float %same.as.v1 - %inc.i = add nuw nsw i32 %i, 1 - %q.next = getelementptr inbounds i8, ptr %q, i64 4 - %c.next = getelementptr inbounds i8, ptr %c, i64 4 - %exitcond = icmp eq i32 %inc.i, %count - br i1 %exitcond, label %exit, label %for.body - -exit: - %vl.1.lcssa = phi float [ %v1.1, %for.body ] - store float %vl.1.lcssa, ptr @A - ret void -} - -; The difference from select_with_identical_phi() is that the true and false values in -; %phi.to.remove.next and %v1.1 are swapped. -; Check that %same.as.v1 can be folded. -define void @select_with_identical_phi_2(ptr %m, ptr %n, i32 %count) { -; CHECK-LABEL: @select_with_identical_phi_2( -; CHECK-NEXT: entry: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[V0:%.*]] = phi float [ 0x4415AF1D80000000, [[ENTRY:%.*]] ], [ [[V0_1:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[V1:%.*]] = phi float [ 0xC415AF1D80000000, [[ENTRY]] ], [ [[V1_1:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC_I:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[Q:%.*]] = phi ptr [ [[M:%.*]], [[ENTRY]] ], [ [[Q_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[C:%.*]] = phi ptr [ [[N:%.*]], [[ENTRY]] ], [ [[C_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[Q_LOAD:%.*]] = load float, ptr [[Q]], align 4 -; CHECK-NEXT: [[C_LOAD:%.*]] = load float, ptr [[C]], align 4 -; CHECK-NEXT: [[SUB:%.*]] = fsub float [[Q_LOAD]], [[C_LOAD]] -; CHECK-NEXT: [[CMP1:%.*]] = fcmp olt float [[SUB]], [[V0]] -; CHECK-NEXT: [[V0_1]] = select i1 [[CMP1]], float [[SUB]], float [[V0]] -; CHECK-NEXT: [[CMP2:%.*]] = fcmp ogt float [[SUB]], [[V1]] -; CHECK-NEXT: [[V1_1]] = select i1 [[CMP2]], float [[V1]], float [[SUB]] -; CHECK-NEXT: [[INC_I]] = add nuw nsw i32 [[I]], 1 -; CHECK-NEXT: [[Q_NEXT]] = getelementptr inbounds nuw i8, ptr [[Q]], i64 4 -; CHECK-NEXT: [[C_NEXT]] = getelementptr inbounds nuw i8, ptr [[C]], i64 4 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC_I]], [[COUNT:%.*]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[FOR_BODY]] -; CHECK: exit: -; CHECK-NEXT: store float [[V1_1]], ptr @A, align 4 -; CHECK-NEXT: ret void -; -entry: - br label %for.body - -for.body: ; preds = %entry, %for.body - %v0 = phi float [ 0x4415AF1D80000000, %entry ], [ %v0.1, %for.body ] - %v1 = phi float [ 0xC415AF1D80000000, %entry ], [ %v1.1, %for.body ] - %phi.to.remove = phi float [ 0xC415AF1D80000000, %entry ], [ %phi.to.remove.next, %for.body ] - %i = phi i32 [ 0, %entry ], [ %inc.i, %for.body ] - %q = phi ptr [ %m, %entry ], [ %q.next, %for.body ] - %c = phi ptr [ %n, %entry ], [ %c.next, %for.body ] - %q.load = load float, ptr %q - %c.load = load float, ptr %c - %sub = fsub float %q.load, %c.load - %cmp1 = fcmp olt float %sub, %v0 - %v0.1 = select i1 %cmp1, float %sub, float %v0 - %same.as.v1 = select i1 %cmp1, float %v1, float %phi.to.remove - %cmp2 = fcmp ogt float %sub, %same.as.v1 - %v1.1 = select i1 %cmp2, float %v1, float %sub - %phi.to.remove.next = select i1 %cmp2, float %same.as.v1, float %sub - %inc.i = add nuw nsw i32 %i, 1 - %q.next = getelementptr inbounds i8, ptr %q, i64 4 - %c.next = getelementptr inbounds i8, ptr %c, i64 4 - %exitcond = icmp eq i32 %inc.i, %count - br i1 %exitcond, label %exit, label %for.body - -exit: - %vl.1.lcssa = phi float [ %v1.1, %for.body ] - store float %vl.1.lcssa, ptr @A - ret void -} - -; The difference from select_with_identical_phi() is that the true and false values in -; same.as.v1 are swapped. -; Check that %same.as.v1 can be folded. -define void @select_with_identical_phi_3(ptr %m, ptr %n, i32 %count) { -; CHECK-LABEL: @select_with_identical_phi_3( -; CHECK-NEXT: entry: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[V0:%.*]] = phi float [ 0x4415AF1D80000000, [[ENTRY:%.*]] ], [ [[V0_1:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[V1:%.*]] = phi float [ 0xC415AF1D80000000, [[ENTRY]] ], [ [[V1_1:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC_I:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[Q:%.*]] = phi ptr [ [[M:%.*]], [[ENTRY]] ], [ [[Q_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[C:%.*]] = phi ptr [ [[N:%.*]], [[ENTRY]] ], [ [[C_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[Q_LOAD:%.*]] = load float, ptr [[Q]], align 4 -; CHECK-NEXT: [[C_LOAD:%.*]] = load float, ptr [[C]], align 4 -; CHECK-NEXT: [[SUB:%.*]] = fsub float [[Q_LOAD]], [[C_LOAD]] -; CHECK-NEXT: [[CMP1:%.*]] = fcmp olt float [[SUB]], [[V0]] -; CHECK-NEXT: [[V0_1]] = select i1 [[CMP1]], float [[SUB]], float [[V0]] -; CHECK-NEXT: [[CMP2:%.*]] = fcmp ogt float [[SUB]], [[V1]] -; CHECK-NEXT: [[V1_1]] = select i1 [[CMP2]], float [[SUB]], float [[V1]] -; CHECK-NEXT: [[INC_I]] = add nuw nsw i32 [[I]], 1 -; CHECK-NEXT: [[Q_NEXT]] = getelementptr inbounds nuw i8, ptr [[Q]], i64 4 -; CHECK-NEXT: [[C_NEXT]] = getelementptr inbounds nuw i8, ptr [[C]], i64 4 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC_I]], [[COUNT:%.*]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[FOR_BODY]] -; CHECK: exit: -; CHECK-NEXT: store float [[V1_1]], ptr @A, align 4 -; CHECK-NEXT: ret void -; -entry: - br label %for.body - -for.body: ; preds = %entry, %for.body - %v0 = phi float [ 0x4415AF1D80000000, %entry ], [ %v0.1, %for.body ] - %v1 = phi float [ 0xC415AF1D80000000, %entry ], [ %v1.1, %for.body ] - %phi.to.remove = phi float [ 0xC415AF1D80000000, %entry ], [ %phi.to.remove.next, %for.body ] - %i = phi i32 [ 0, %entry ], [ %inc.i, %for.body ] - %q = phi ptr [ %m, %entry ], [ %q.next, %for.body ] - %c = phi ptr [ %n, %entry ], [ %c.next, %for.body ] - %q.load = load float, ptr %q - %c.load = load float, ptr %c - %sub = fsub float %q.load, %c.load - %cmp1 = fcmp olt float %sub, %v0 - %v0.1 = select i1 %cmp1, float %sub, float %v0 - %same.as.v1 = select i1 %cmp1, float %phi.to.remove, float %v1 - %cmp2 = fcmp ogt float %sub, %same.as.v1 - %v1.1 = select i1 %cmp2, float %sub, float %v1 - %phi.to.remove.next = select i1 %cmp2, float %sub, float %same.as.v1 - %inc.i = add nuw nsw i32 %i, 1 - %q.next = getelementptr inbounds i8, ptr %q, i64 4 - %c.next = getelementptr inbounds i8, ptr %c, i64 4 - %exitcond = icmp eq i32 %inc.i, %count - br i1 %exitcond, label %exit, label %for.body - -exit: - %vl.1.lcssa = phi float [ %v1.1, %for.body ] - store float %vl.1.lcssa, ptr @A - ret void -} - -; The difference from select_with_identical_phi() is that the true and false values in -; %same.as.v1, %phi.to.remove.next and %v1.1 are swapped. -; Check that %same.as.v1 can be folded. -define void @select_with_identical_phi_4(ptr %m, ptr %n, i32 %count) { -; CHECK-LABEL: @select_with_identical_phi_4( -; CHECK-NEXT: entry: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[V0:%.*]] = phi float [ 0x4415AF1D80000000, [[ENTRY:%.*]] ], [ [[V0_1:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[V1:%.*]] = phi float [ 0xC415AF1D80000000, [[ENTRY]] ], [ [[V1_1:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC_I:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[Q:%.*]] = phi ptr [ [[M:%.*]], [[ENTRY]] ], [ [[Q_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[C:%.*]] = phi ptr [ [[N:%.*]], [[ENTRY]] ], [ [[C_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[Q_LOAD:%.*]] = load float, ptr [[Q]], align 4 -; CHECK-NEXT: [[C_LOAD:%.*]] = load float, ptr [[C]], align 4 -; CHECK-NEXT: [[SUB:%.*]] = fsub float [[Q_LOAD]], [[C_LOAD]] -; CHECK-NEXT: [[CMP1:%.*]] = fcmp olt float [[SUB]], [[V0]] -; CHECK-NEXT: [[V0_1]] = select i1 [[CMP1]], float [[SUB]], float [[V0]] -; CHECK-NEXT: [[CMP2:%.*]] = fcmp ogt float [[SUB]], [[V1]] -; CHECK-NEXT: [[V1_1]] = select i1 [[CMP2]], float [[V1]], float [[SUB]] -; CHECK-NEXT: [[INC_I]] = add nuw nsw i32 [[I]], 1 -; CHECK-NEXT: [[Q_NEXT]] = getelementptr inbounds nuw i8, ptr [[Q]], i64 4 -; CHECK-NEXT: [[C_NEXT]] = getelementptr inbounds nuw i8, ptr [[C]], i64 4 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC_I]], [[COUNT:%.*]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[FOR_BODY]] -; CHECK: exit: -; CHECK-NEXT: store float [[V1_1]], ptr @A, align 4 -; CHECK-NEXT: ret void -; -entry: - br label %for.body - -for.body: ; preds = %entry, %for.body - %v0 = phi float [ 0x4415AF1D80000000, %entry ], [ %v0.1, %for.body ] - %v1 = phi float [ 0xC415AF1D80000000, %entry ], [ %v1.1, %for.body ] - %phi.to.remove = phi float [ 0xC415AF1D80000000, %entry ], [ %phi.to.remove.next, %for.body ] - %i = phi i32 [ 0, %entry ], [ %inc.i, %for.body ] - %q = phi ptr [ %m, %entry ], [ %q.next, %for.body ] - %c = phi ptr [ %n, %entry ], [ %c.next, %for.body ] - %q.load = load float, ptr %q - %c.load = load float, ptr %c - %sub = fsub float %q.load, %c.load - %cmp1 = fcmp olt float %sub, %v0 - %v0.1 = select i1 %cmp1, float %sub, float %v0 - %same.as.v1 = select i1 %cmp1, float %phi.to.remove, float %v1 - %cmp2 = fcmp ogt float %sub, %same.as.v1 - %v1.1 = select i1 %cmp2, float %v1, float %sub - %phi.to.remove.next = select i1 %cmp2, float %same.as.v1, float %sub - %inc.i = add nuw nsw i32 %i, 1 - %q.next = getelementptr inbounds i8, ptr %q, i64 4 - %c.next = getelementptr inbounds i8, ptr %c, i64 4 - %exitcond = icmp eq i32 %inc.i, %count - br i1 %exitcond, label %exit, label %for.body - -exit: - %vl.1.lcssa = phi float [ %v1.1, %for.body ] - store float %vl.1.lcssa, ptr @A - ret void -} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll index 829acbbf..305a692 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll @@ -210,3 +210,175 @@ loop: exit: ret void } + +define void @test_masked_interleave_group(i32 %N, ptr %mask, ptr %src, ptr %dst) { +; IC1-LABEL: define void @test_masked_interleave_group( +; IC1-SAME: i32 [[N:%.*]], ptr [[MASK:%.*]], ptr [[SRC:%.*]], ptr [[DST:%.*]]) #[[ATTR0]] { +; IC1-NEXT: [[ENTRY:.*:]] +; IC1-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 +; IC1-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1 +; IC1-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; IC1-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2 +; IC1-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP3]], i64 8) +; IC1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], [[UMAX]] +; IC1-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; IC1: [[VECTOR_MEMCHECK]]: +; IC1-NEXT: [[TMP4:%.*]] = zext i32 [[N]] to i64 +; IC1-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 +; IC1-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP5]], 16 +; IC1-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP6]] +; IC1-NEXT: [[TMP7:%.*]] = add nuw nsw i64 [[TMP4]], 1 +; IC1-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[TMP7]] +; IC1-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP6]] +; IC1-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP1]] +; IC1-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[MASK]], [[SCEVGEP]] +; IC1-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] +; IC1-NEXT: [[BOUND03:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP2]] +; IC1-NEXT: [[BOUND14:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] +; IC1-NEXT: [[FOUND_CONFLICT5:%.*]] = and i1 [[BOUND03]], [[BOUND14]] +; IC1-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]] +; IC1-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; IC1: [[VECTOR_PH]]: +; IC1-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; IC1-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 +; IC1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP9]] +; IC1-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] +; IC1-NEXT: [[TMP10:%.*]] = trunc i64 [[N_VEC]] to i32 +; IC1-NEXT: [[TMP11:%.*]] = mul i64 [[N_VEC]], 16 +; IC1-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP11]] +; IC1-NEXT: [[TMP13:%.*]] = mul i64 [[N_VEC]], 16 +; IC1-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP13]] +; IC1-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[N_VEC]] +; IC1-NEXT: br label %[[VECTOR_BODY:.*]] +; IC1: [[VECTOR_BODY]]: +; IC1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; IC1-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 16 +; IC1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX]] +; IC1-NEXT: [[OFFSET_IDX6:%.*]] = mul i64 [[INDEX]], 16 +; IC1-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX6]] +; IC1-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[INDEX]] +; IC1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[NEXT_GEP8]], align 1, !alias.scope [[META6:![0-9]+]] +; IC1-NEXT: [[TMP16:%.*]] = icmp eq <vscale x 4 x i8> [[WIDE_LOAD]], zeroinitializer +; IC1-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 16 x i1> @llvm.vector.interleave4.nxv16i1(<vscale x 4 x i1> [[TMP16]], <vscale x 4 x i1> [[TMP16]], <vscale x 4 x i1> [[TMP16]], <vscale x 4 x i1> [[TMP16]]) +; IC1-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 16 x float> @llvm.masked.load.nxv16f32.p0(ptr align 4 [[NEXT_GEP7]], <vscale x 16 x i1> [[INTERLEAVED_MASK]], <vscale x 16 x float> poison), !alias.scope [[META9:![0-9]+]] +; IC1-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.vector.deinterleave4.nxv16f32(<vscale x 16 x float> [[WIDE_MASKED_VEC]]) +; IC1-NEXT: [[TMP17:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 0 +; IC1-NEXT: [[TMP18:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 1 +; IC1-NEXT: [[TMP19:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 2 +; IC1-NEXT: [[TMP20:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 3 +; IC1-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 16 x float> @llvm.vector.interleave4.nxv16f32(<vscale x 4 x float> [[TMP17]], <vscale x 4 x float> [[TMP18]], <vscale x 4 x float> [[TMP19]], <vscale x 4 x float> [[TMP20]]) +; IC1-NEXT: [[INTERLEAVED_MASK9:%.*]] = call <vscale x 16 x i1> @llvm.vector.interleave4.nxv16i1(<vscale x 4 x i1> [[TMP16]], <vscale x 4 x i1> [[TMP16]], <vscale x 4 x i1> [[TMP16]], <vscale x 4 x i1> [[TMP16]]) +; IC1-NEXT: call void @llvm.masked.store.nxv16f32.p0(<vscale x 16 x float> [[INTERLEAVED_VEC]], ptr align 4 [[NEXT_GEP]], <vscale x 16 x i1> [[INTERLEAVED_MASK9]]), !alias.scope [[META11:![0-9]+]], !noalias [[META13:![0-9]+]] +; IC1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] +; IC1-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; IC1-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; IC1: [[MIDDLE_BLOCK]]: +; IC1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] +; IC1-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] +; IC1: [[SCALAR_PH]]: +; +; CHECK-LABEL: define void @test_masked_interleave_group( +; CHECK-SAME: i32 [[N:%.*]], ptr [[MASK:%.*]], ptr [[SRC:%.*]], ptr [[DST:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2 +; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP3]], i64 8) +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], [[UMAX]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK: [[VECTOR_MEMCHECK]]: +; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[N]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 +; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP5]], 16 +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP7:%.*]] = add nuw nsw i64 [[TMP4]], 1 +; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP6]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP1]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[MASK]], [[SCEVGEP]] +; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] +; CHECK-NEXT: [[BOUND03:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP2]] +; CHECK-NEXT: [[BOUND14:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] +; CHECK-NEXT: [[FOUND_CONFLICT5:%.*]] = and i1 [[BOUND03]], [[BOUND14]] +; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]] +; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], [[TMP9]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[N_VEC]] to i32 +; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[N_VEC]], 16 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[N_VEC]], 16 +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[N_VEC]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 16 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[OFFSET_IDX6:%.*]] = mul i64 [[INDEX]], 16 +; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX6]] +; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[NEXT_GEP8]], align 1, !alias.scope [[META6:![0-9]+]] +; CHECK-NEXT: [[TMP16:%.*]] = icmp eq <vscale x 4 x i8> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 16 x i1> @llvm.vector.interleave4.nxv16i1(<vscale x 4 x i1> [[TMP16]], <vscale x 4 x i1> [[TMP16]], <vscale x 4 x i1> [[TMP16]], <vscale x 4 x i1> [[TMP16]]) +; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 16 x float> @llvm.masked.load.nxv16f32.p0(ptr align 4 [[NEXT_GEP7]], <vscale x 16 x i1> [[INTERLEAVED_MASK]], <vscale x 16 x float> poison), !alias.scope [[META9:![0-9]+]] +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.vector.deinterleave4.nxv16f32(<vscale x 16 x float> [[WIDE_MASKED_VEC]]) +; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 0 +; CHECK-NEXT: [[TMP18:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 1 +; CHECK-NEXT: [[TMP19:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 2 +; CHECK-NEXT: [[TMP20:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[STRIDED_VEC]], 3 +; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 16 x float> @llvm.vector.interleave4.nxv16f32(<vscale x 4 x float> [[TMP17]], <vscale x 4 x float> [[TMP18]], <vscale x 4 x float> [[TMP19]], <vscale x 4 x float> [[TMP20]]) +; CHECK-NEXT: [[INTERLEAVED_MASK9:%.*]] = call <vscale x 16 x i1> @llvm.vector.interleave4.nxv16i1(<vscale x 4 x i1> [[TMP16]], <vscale x 4 x i1> [[TMP16]], <vscale x 4 x i1> [[TMP16]], <vscale x 4 x i1> [[TMP16]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv16f32.p0(<vscale x 16 x float> [[INTERLEAVED_VEC]], ptr align 4 [[NEXT_GEP]], <vscale x 16 x i1> [[INTERLEAVED_MASK9]]), !alias.scope [[META11:![0-9]+]], !noalias [[META13:![0-9]+]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] +; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; +entry: + br label %loop.header + +loop.header: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop.latch ] + %dst.iv = phi ptr [ %dst, %entry ], [ %dst.iv.next, %loop.latch ] + %src.iv = phi ptr [ %src, %entry ], [ %src.iv.next, %loop.latch ] + %mask.iv = phi ptr [ %mask, %entry ], [ %mask.iv.next, %loop.latch ] + %mask.iv.next = getelementptr i8, ptr %mask.iv, i64 1 + %mask.val = load i8, ptr %mask.iv, align 1 + %should.copy = icmp eq i8 %mask.val, 0 + br i1 %should.copy, label %then, label %loop.latch + +then: + %elem0 = load float, ptr %src.iv, align 4 + store float %elem0, ptr %dst.iv, align 4 + %src.1.ptr = getelementptr i8, ptr %src.iv, i64 4 + %s1 = load float, ptr %src.1.ptr, align 4 + %dst.1.ptr = getelementptr i8, ptr %dst.iv, i64 4 + store float %s1, ptr %dst.1.ptr, align 4 + %src.2.ptr = getelementptr i8, ptr %src.iv, i64 8 + %s2 = load float, ptr %src.2.ptr, align 4 + %dst.2.ptr = getelementptr i8, ptr %dst.iv, i64 8 + store float %s2, ptr %dst.2.ptr, align 4 + %src.3.ptr = getelementptr i8, ptr %src.iv, i64 12 + %s3 = load float, ptr %src.3.ptr, align 4 + %dst.3.ptr = getelementptr i8, ptr %dst.iv, i64 12 + store float %s3, ptr %dst.3.ptr, align 4 + br label %loop.latch + +loop.latch: + %iv.next = add i32 %iv, 1 + %src.iv.next = getelementptr i8, ptr %src.iv, i64 16 + %dst.iv.next = getelementptr i8, ptr %dst.iv, i64 16 + %ec = icmp eq i32 %iv, %N + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/WebAssembly/memory-interleave.ll b/llvm/test/Transforms/LoopVectorize/WebAssembly/memory-interleave.ll index c8d20dc..e42e2c7 100644 --- a/llvm/test/Transforms/LoopVectorize/WebAssembly/memory-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/WebAssembly/memory-interleave.ll @@ -7,6 +7,7 @@ target triple = "wasm32-unknown-wasi" %struct.TwoInts = type { i32, i32 } %struct.ThreeInts = type { i32, i32, i32 } %struct.FourInts = type { i32, i32, i32, i32 } +%struct.TwoShorts = type { i16, i16 } %struct.ThreeShorts = type { i16, i16, i16 } %struct.FourShorts = type { i16, i16, i16, i16 } %struct.TwoBytes = type { i8, i8 } @@ -14,6 +15,8 @@ target triple = "wasm32-unknown-wasi" %struct.FourBytes = type { i8, i8, i8, i8 } %struct.FiveBytes = type { i8, i8, i8, i8, i8 } %struct.EightBytes = type { i8, i8, i8, i8, i8, i8, i8, i8 } +%struct.TwoFloats = type { float, float } +%struct.FourFloats = type { float, float, float, float } ; CHECK-LABEL: two_ints_same_op ; CHECK: Cost of 7 for VF 2: INTERLEAVE-GROUP with factor 2 at %10 @@ -1350,3 +1353,1000 @@ define hidden void @scale_uv_row_down2_linear(ptr nocapture noundef readonly %0, 34: ; preds = %6, %4 ret void } + +; CHECK-LABEL: two_floats_same_op +; CHECK: LV: Scalar loop costs: 14 +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: LV: Scalar loop costs: 14. +; CHECK: LV: Vector loop of width 2 costs: 19. +; CHECK: LV: Vector loop of width 4 costs: 15. +; CHECK: LV: Selecting VF: 1. +define hidden void @two_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp21.not = icmp eq i32 %N, 0 + br i1 %cmp21.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.022 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.022 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.022 + %1 = load float, ptr %arrayidx1, align 4 + %mul = fmul float %0, %1 + %arrayidx3 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.022 + store float %mul, ptr %arrayidx3, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %mul8 = fmul float %2, %3 + %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4 + store float %mul8, ptr %y10, align 4 + %inc = add nuw i32 %i.022, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_floats_vary_op +; CHECK: LV: Scalar loop costs: 14 +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: LV: Scalar loop costs: 14. +; CHECK: LV: Vector loop of width 2 costs: 19. +; CHECK: LV: Vector loop of width 4 costs: 15. +; CHECK: LV: Selecting VF: 1. +define hidden void @two_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp20.not = icmp eq i32 %N, 0 + br i1 %cmp20.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.021 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.021 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.021 + %1 = load float, ptr %arrayidx1, align 4 + %add = fadd float %0, %1 + %arrayidx3 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.021 + store float %add, ptr %arrayidx3, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %sub = fsub float %2, %3 + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4 + store float %sub, ptr %y9, align 4 + %inc = add nuw i32 %i.021, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_bytes_two_floats_same_op +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 11 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 11 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: LV: Scalar loop costs: 18 +; CHECK: LV: Vector loop of width 2 costs: 23 +; CHECK: LV: Vector loop of width 4 costs: 13 +; CHECK: LV: Selecting VF: 4. +define hidden void @two_bytes_two_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp24.not = icmp eq i32 %N, 0 + br i1 %cmp24.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.025 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoBytes, ptr %a, i32 %i.025 + %0 = load i8, ptr %arrayidx, align 1 + %conv = sitofp i8 %0 to float + %arrayidx1 = getelementptr inbounds nuw %struct.TwoBytes, ptr %b, i32 %i.025 + %1 = load i8, ptr %arrayidx1, align 1 + %conv3 = sitofp i8 %1 to float + %mul = fmul float %conv, %conv3 + %arrayidx4 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.025 + store float %mul, ptr %arrayidx4, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 1 + %2 = load i8, ptr %y, align 1 + %conv7 = sitofp i8 %2 to float + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 1 + %3 = load i8, ptr %y9, align 1 + %conv10 = sitofp i8 %3 to float + %mul11 = fmul float %conv7, %conv10 + %y13 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4 + store float %mul11, ptr %y13, align 4 + %inc = add nuw i32 %i.025, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_bytes_two_floats_vary_op +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 11 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 11 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: LV: Scalar loop costs: 18 +; CHECK: LV: Vector loop of width 2 costs: 23 +; CHECK: LV: Vector loop of width 4 costs: 13 +; CHECK: LV: Selecting VF: 4. +define hidden void @two_bytes_two_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp23.not = icmp eq i32 %N, 0 + br i1 %cmp23.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.024 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoBytes, ptr %a, i32 %i.024 + %0 = load i8, ptr %arrayidx, align 1 + %conv = sitofp i8 %0 to float + %arrayidx1 = getelementptr inbounds nuw %struct.TwoBytes, ptr %b, i32 %i.024 + %1 = load i8, ptr %arrayidx1, align 1 + %conv3 = sitofp i8 %1 to float + %add = fadd float %conv, %conv3 + %arrayidx4 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.024 + store float %add, ptr %arrayidx4, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 1 + %2 = load i8, ptr %y, align 1 + %conv7 = sitofp i8 %2 to float + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 1 + %3 = load i8, ptr %y9, align 1 + %conv10 = sitofp i8 %3 to float + %sub = fsub float %conv7, %conv10 + %y12 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4 + store float %sub, ptr %y12, align 4 + %inc = add nuw i32 %i.024, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_floats_two_bytes_same_op +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 11 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: LV: Scalar loop costs: 16 +; CHECK: LV: Vector loop of width 2 costs: 21 +; CHECK: LV: Vector loop of width 4 costs: 14. +; CHECK: LV: Selecting VF: 4. +define hidden void @two_floats_two_bytes_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp22.not = icmp eq i32 %N, 0 + br i1 %cmp22.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.023 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.023 + %1 = load float, ptr %arrayidx1, align 4 + %mul = fmul float %0, %1 + %conv = fptosi float %mul to i8 + %arrayidx3 = getelementptr inbounds nuw %struct.TwoBytes, ptr %res, i32 %i.023 + store i8 %conv, ptr %arrayidx3, align 1 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %mul8 = fmul float %2, %3 + %conv9 = fptosi float %mul8 to i8 + %y11 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 1 + store i8 %conv9, ptr %y11, align 1 + %inc = add nuw i32 %i.023, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_floats_two_bytes_vary_op +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: LV: Scalar loop costs: 16 +; CHECK: LV: Vector loop of width 2 costs: 21 +; CHECK: LV: Vector loop of width 4 costs: 14. +; CHECK: LV: Selecting VF: 4. +define hidden void @two_floats_two_bytes_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp21.not = icmp eq i32 %N, 0 + br i1 %cmp21.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.022 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.022 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.022 + %1 = load float, ptr %arrayidx1, align 4 + %add = fadd float %0, %1 + %conv = fptosi float %add to i8 + %arrayidx3 = getelementptr inbounds nuw %struct.TwoBytes, ptr %res, i32 %i.022 + store i8 %conv, ptr %arrayidx3, align 1 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %sub = fsub float %2, %3 + %conv8 = fptosi float %sub to i8 + %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 1 + store i8 %conv8, ptr %y10, align 1 + %inc = add nuw i32 %i.022, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_shorts_two_floats_same_op +; CHECK: Cost of 11 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 11 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: LV: Scalar loop costs: 18 +; CHECK: LV: Vector loop of width 2 costs: 22 +; CHECK: LV: Vector loop of width 4 costs: 11. +; CHECK: LV: Selecting VF: 4. +define hidden void @two_shorts_two_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp24.not = icmp eq i32 %N, 0 + br i1 %cmp24.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.025 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoShorts, ptr %a, i32 %i.025 + %0 = load i16, ptr %arrayidx, align 2 + %conv = sitofp i16 %0 to float + %arrayidx1 = getelementptr inbounds nuw %struct.TwoShorts, ptr %b, i32 %i.025 + %1 = load i16, ptr %arrayidx1, align 2 + %conv3 = sitofp i16 %1 to float + %mul = fmul float %conv, %conv3 + %arrayidx4 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.025 + store float %mul, ptr %arrayidx4, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2 + %2 = load i16, ptr %y, align 2 + %conv7 = sitofp i16 %2 to float + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2 + %3 = load i16, ptr %y9, align 2 + %conv10 = sitofp i16 %3 to float + %mul11 = fmul float %conv7, %conv10 + %y13 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4 + store float %mul11, ptr %y13, align 4 + %inc = add nuw i32 %i.025, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_shorts_two_floats_vary_op +; CHECK: Cost of 11 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 11 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: LV: Scalar loop costs: 18 +; CHECK: LV: Vector loop of width 2 costs: 22 +; CHECK: LV: Vector loop of width 4 costs: 11. +; CHECK: LV: Selecting VF: 4. +define hidden void @two_shorts_two_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp23.not = icmp eq i32 %N, 0 + br i1 %cmp23.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.024 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoShorts, ptr %a, i32 %i.024 + %0 = load i16, ptr %arrayidx, align 2 + %conv = sitofp i16 %0 to float + %arrayidx1 = getelementptr inbounds nuw %struct.TwoShorts, ptr %b, i32 %i.024 + %1 = load i16, ptr %arrayidx1, align 2 + %conv3 = sitofp i16 %1 to float + %add = fadd float %conv, %conv3 + %arrayidx4 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.024 + store float %add, ptr %arrayidx4, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2 + %2 = load i16, ptr %y, align 2 + %conv7 = sitofp i16 %2 to float + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2 + %3 = load i16, ptr %y9, align 2 + %conv10 = sitofp i16 %3 to float + %sub = fsub float %conv7, %conv10 + %y12 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4 + store float %sub, ptr %y12, align 4 + %inc = add nuw i32 %i.024, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_floats_two_shorts_same_op +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 11 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: LV: Scalar loop costs: 16 +; CHECK: LV: Vector loop of width 2 costs: 20 +; CHECK: LV: Vector loop of width 4 costs: 13. +; CHECK: LV: Selecting VF: 4. +define hidden void @two_floats_two_shorts_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp22.not = icmp eq i32 %N, 0 + br i1 %cmp22.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.023 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.023 + %1 = load float, ptr %arrayidx1, align 4 + %mul = fmul float %0, %1 + %conv = fptosi float %mul to i16 + %arrayidx3 = getelementptr inbounds nuw %struct.TwoShorts, ptr %res, i32 %i.023 + store i16 %conv, ptr %arrayidx3, align 2 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %mul8 = fmul float %2, %3 + %conv9 = fptosi float %mul8 to i16 + %y11 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2 + store i16 %conv9, ptr %y11, align 2 + %inc = add nuw i32 %i.023, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: two_floats_two_shorts_vary_op +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 11 for VF 2: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 2 +; CHECK: LV: Scalar loop costs: 16 +; CHECK: LV: Vector loop of width 2 costs: 20 +; CHECK: LV: Vector loop of width 4 costs: 13. +; CHECK: LV: Selecting VF: 4. +define hidden void @two_floats_two_shorts_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp21.not = icmp eq i32 %N, 0 + br i1 %cmp21.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.022 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.022 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.022 + %1 = load float, ptr %arrayidx1, align 4 + %add = fadd float %0, %1 + %conv = fptosi float %add to i16 + %arrayidx3 = getelementptr inbounds nuw %struct.TwoShorts, ptr %res, i32 %i.022 + store i16 %conv, ptr %arrayidx3, align 2 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %sub = fsub float %2, %3 + %conv8 = fptosi float %sub to i16 + %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2 + store i16 %conv8, ptr %y10, align 2 + %inc = add nuw i32 %i.022, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_floats_same_op +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: LV: Scalar loop costs: 24 +; CHECK: LV: Vector loop of width 2 costs: 33 +; CHECK: LV: Vector loop of width 4 costs: 30 +; CHECK: LV: Selecting VF: 4 +define hidden void @four_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp45.not = icmp eq i32 %N, 0 + br i1 %cmp45.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.046 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.046 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.046 + %1 = load float, ptr %arrayidx1, align 4 + %mul = fmul float %0, %1 + %arrayidx3 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.046 + store float %mul, ptr %arrayidx3, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %mul8 = fmul float %2, %3 + %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4 + store float %mul8, ptr %y10, align 4 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8 + %4 = load float, ptr %z, align 4 + %z13 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8 + %5 = load float, ptr %z13, align 4 + %mul14 = fmul float %4, %5 + %z16 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 8 + store float %mul14, ptr %z16, align 4 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12 + %6 = load float, ptr %w, align 4 + %w19 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12 + %7 = load float, ptr %w19, align 4 + %mul20 = fmul float %6, %7 + %w22 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 12 + store float %mul20, ptr %w22, align 4 + %inc = add nuw i32 %i.046, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_floats_vary_op +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: LV: Scalar loop costs: 24 +; CHECK: LV: Vector loop of width 2 costs: 33 +; CHECK: LV: Vector loop of width 4 costs: 30 +; CHECK: LV: Selecting VF: 1 +define hidden void @four_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp42.not = icmp eq i32 %N, 0 + br i1 %cmp42.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.043 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.043 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.043 + %1 = load float, ptr %arrayidx1, align 4 + %add = fadd float %0, %1 + %arrayidx3 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.043 + store float %add, ptr %arrayidx3, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %sub = fsub float %2, %3 + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4 + store float %sub, ptr %y9, align 4 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8 + %4 = load float, ptr %z, align 4 + %z12 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8 + %5 = load float, ptr %z12, align 4 + %mul = fmul float %4, %5 + %z14 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 8 + store float %mul, ptr %z14, align 4 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12 + %6 = load float, ptr %w, align 4 + %w17 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12 + %7 = load float, ptr %w17, align 4 + %div = fdiv float %6, %7 + %w19 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 12 + store float %div, ptr %w19, align 4 + %inc = add nuw i32 %i.043, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_bytes_four_floats_same_op +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: LV: Scalar loop costs: 32 +; CHECK: LV: Vector loop of width 2 costs: 43 +; CHECK: LV: Vector loop of width 4 costs: 23 +; CHECK: LV: Selecting VF: 4 +define hidden void @four_bytes_four_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp52.not = icmp eq i32 %N, 0 + br i1 %cmp52.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.053 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourBytes, ptr %a, i32 %i.053 + %0 = load i8, ptr %arrayidx, align 1 + %conv = sitofp i8 %0 to float + %arrayidx1 = getelementptr inbounds nuw %struct.FourBytes, ptr %b, i32 %i.053 + %1 = load i8, ptr %arrayidx1, align 1 + %conv3 = sitofp i8 %1 to float + %mul = fmul float %conv, %conv3 + %arrayidx4 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.053 + store float %mul, ptr %arrayidx4, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 1 + %2 = load i8, ptr %y, align 1 + %conv7 = sitofp i8 %2 to float + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 1 + %3 = load i8, ptr %y9, align 1 + %conv10 = sitofp i8 %3 to float + %mul11 = fmul float %conv7, %conv10 + %y13 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4 + store float %mul11, ptr %y13, align 4 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2 + %4 = load i8, ptr %z, align 1 + %conv15 = sitofp i8 %4 to float + %z17 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2 + %5 = load i8, ptr %z17, align 1 + %conv18 = sitofp i8 %5 to float + %mul19 = fmul float %conv15, %conv18 + %z21 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 8 + store float %mul19, ptr %z21, align 4 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 3 + %6 = load i8, ptr %w, align 1 + %conv23 = sitofp i8 %6 to float + %w25 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 3 + %7 = load i8, ptr %w25, align 1 + %conv26 = sitofp i8 %7 to float + %mul27 = fmul float %conv23, %conv26 + %w29 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 12 + store float %mul27, ptr %w29, align 4 + %inc = add nuw i32 %i.053, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_bytes_four_floats_vary_op +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: LV: Scalar loop costs: 32 +; CHECK: LV: Vector loop of width 2 costs: 43 +; CHECK: LV: Vector loop of width 4 costs: 23 +; CHECK: LV: Selecting VF: 4 +define hidden void @four_bytes_four_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp49.not = icmp eq i32 %N, 0 + br i1 %cmp49.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.050 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourBytes, ptr %a, i32 %i.050 + %0 = load i8, ptr %arrayidx, align 1 + %conv = sitofp i8 %0 to float + %arrayidx1 = getelementptr inbounds nuw %struct.FourBytes, ptr %b, i32 %i.050 + %1 = load i8, ptr %arrayidx1, align 1 + %conv3 = sitofp i8 %1 to float + %mul = fmul float %conv, %conv3 + %arrayidx4 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.050 + store float %mul, ptr %arrayidx4, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 1 + %2 = load i8, ptr %y, align 1 + %conv7 = sitofp i8 %2 to float + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 1 + %3 = load i8, ptr %y9, align 1 + %conv10 = sitofp i8 %3 to float + %add = fadd float %conv7, %conv10 + %y12 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4 + store float %add, ptr %y12, align 4 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2 + %4 = load i8, ptr %z, align 1 + %conv14 = sitofp i8 %4 to float + %z16 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2 + %5 = load i8, ptr %z16, align 1 + %conv17 = sitofp i8 %5 to float + %div = fdiv float %conv14, %conv17 + %z19 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 8 + store float %div, ptr %z19, align 4 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 3 + %6 = load i8, ptr %w, align 1 + %conv21 = sitofp i8 %6 to float + %w23 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 3 + %7 = load i8, ptr %w23, align 1 + %conv24 = sitofp i8 %7 to float + %sub = fsub float %conv21, %conv24 + %w26 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 12 + store float %sub, ptr %w26, align 4 + %inc = add nuw i32 %i.050, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_floats_four_bytes_same_op +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: LV: Scalar loop costs: 28 +; CHECK: LV: Vector loop of width 2 costs: 38 +; CHECK: LV: Vector loop of width 4 costs: 26 +; CHECK: LV: Selecting VF: 4 +define hidden void @four_floats_four_bytes_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp48.not = icmp eq i32 %N, 0 + br i1 %cmp48.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.049 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.049 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.049 + %1 = load float, ptr %arrayidx1, align 4 + %mul = fmul float %0, %1 + %conv = fptosi float %mul to i8 + %arrayidx3 = getelementptr inbounds nuw %struct.FourBytes, ptr %res, i32 %i.049 + store i8 %conv, ptr %arrayidx3, align 1 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %mul8 = fmul float %2, %3 + %conv9 = fptosi float %mul8 to i8 + %y11 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 1 + store i8 %conv9, ptr %y11, align 1 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8 + %4 = load float, ptr %z, align 4 + %z14 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8 + %5 = load float, ptr %z14, align 4 + %mul15 = fmul float %4, %5 + %conv16 = fptosi float %mul15 to i8 + %z18 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2 + store i8 %conv16, ptr %z18, align 1 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12 + %6 = load float, ptr %w, align 4 + %w21 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12 + %7 = load float, ptr %w21, align 4 + %mul22 = fmul float %6, %7 + %conv23 = fptosi float %mul22 to i8 + %w25 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 3 + store i8 %conv23, ptr %w25, align 1 + %inc = add nuw i32 %i.049, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_floats_four_bytes_vary_op +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: LV: Scalar loop costs: 28 +; CHECK: LV: Vector loop of width 2 costs: 38 +; CHECK: LV: Vector loop of width 4 costs: 26 +; CHECK: LV: Selecting VF: 4 +define hidden void @four_floats_four_bytes_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp45.not = icmp eq i32 %N, 0 + br i1 %cmp45.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.046 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.046 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.046 + %1 = load float, ptr %arrayidx1, align 4 + %mul = fmul float %0, %1 + %conv = fptosi float %mul to i8 + %arrayidx3 = getelementptr inbounds nuw %struct.FourBytes, ptr %res, i32 %i.046 + store i8 %conv, ptr %arrayidx3, align 1 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %add = fadd float %2, %3 + %conv8 = fptosi float %add to i8 + %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 1 + store i8 %conv8, ptr %y10, align 1 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8 + %4 = load float, ptr %z, align 4 + %z13 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8 + %5 = load float, ptr %z13, align 4 + %div = fdiv float %4, %5 + %conv14 = fptosi float %div to i8 + %z16 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2 + store i8 %conv14, ptr %z16, align 1 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12 + %6 = load float, ptr %w, align 4 + %w19 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12 + %7 = load float, ptr %w19, align 4 + %sub = fsub float %6, %7 + %conv20 = fptosi float %sub to i8 + %w22 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 3 + store i8 %conv20, ptr %w22, align 1 + %inc = add nuw i32 %i.046, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_shorts_four_floats_same_op +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: LV: Scalar loop costs: 32 +; CHECK: LV: Vector loop of width 2 costs: 37 +; CHECK: LV: Vector loop of width 4 costs: 23 +; CHECK: LV: Selecting VF: 4 +define hidden void @four_shorts_four_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp52.not = icmp eq i32 %N, 0 + br i1 %cmp52.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.053 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourShorts, ptr %a, i32 %i.053 + %0 = load i16, ptr %arrayidx, align 2 + %conv = sitofp i16 %0 to float + %arrayidx1 = getelementptr inbounds nuw %struct.FourShorts, ptr %b, i32 %i.053 + %1 = load i16, ptr %arrayidx1, align 2 + %conv3 = sitofp i16 %1 to float + %mul = fmul float %conv, %conv3 + %arrayidx4 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.053 + store float %mul, ptr %arrayidx4, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2 + %2 = load i16, ptr %y, align 2 + %conv7 = sitofp i16 %2 to float + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2 + %3 = load i16, ptr %y9, align 2 + %conv10 = sitofp i16 %3 to float + %mul11 = fmul float %conv7, %conv10 + %y13 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4 + store float %mul11, ptr %y13, align 4 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %4 = load i16, ptr %z, align 2 + %conv15 = sitofp i16 %4 to float + %z17 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %5 = load i16, ptr %z17, align 2 + %conv18 = sitofp i16 %5 to float + %mul19 = fmul float %conv15, %conv18 + %z21 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 8 + store float %mul19, ptr %z21, align 4 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 6 + %6 = load i16, ptr %w, align 2 + %conv23 = sitofp i16 %6 to float + %w25 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 6 + %7 = load i16, ptr %w25, align 2 + %conv26 = sitofp i16 %7 to float + %mul27 = fmul float %conv23, %conv26 + %w29 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 12 + store float %mul27, ptr %w29, align 4 + %inc = add nuw i32 %i.053, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_shorts_four_floats_vary_op +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: LV: Scalar loop costs: 32 +; CHECK: LV: Vector loop of width 2 costs: 37 +; CHECK: LV: Vector loop of width 4 costs: 23 +; CHECK: LV: Selecting VF: 4 +define hidden void @four_shorts_four_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp49.not = icmp eq i32 %N, 0 + br i1 %cmp49.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.050 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourShorts, ptr %a, i32 %i.050 + %0 = load i16, ptr %arrayidx, align 2 + %conv = sitofp i16 %0 to float + %arrayidx1 = getelementptr inbounds nuw %struct.FourShorts, ptr %b, i32 %i.050 + %1 = load i16, ptr %arrayidx1, align 2 + %conv3 = sitofp i16 %1 to float + %mul = fmul float %conv, %conv3 + %arrayidx4 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.050 + store float %mul, ptr %arrayidx4, align 4 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2 + %2 = load i16, ptr %y, align 2 + %conv7 = sitofp i16 %2 to float + %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2 + %3 = load i16, ptr %y9, align 2 + %conv10 = sitofp i16 %3 to float + %add = fadd float %conv7, %conv10 + %y12 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4 + store float %add, ptr %y12, align 4 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %4 = load i16, ptr %z, align 2 + %conv14 = sitofp i16 %4 to float + %z16 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %5 = load i16, ptr %z16, align 2 + %conv17 = sitofp i16 %5 to float + %div = fdiv float %conv14, %conv17 + %z19 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 8 + store float %div, ptr %z19, align 4 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 6 + %6 = load i16, ptr %w, align 2 + %conv21 = sitofp i16 %6 to float + %w23 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 6 + %7 = load i16, ptr %w23, align 2 + %conv24 = sitofp i16 %7 to float + %sub = fsub float %conv21, %conv24 + %w26 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 12 + store float %sub, ptr %w26, align 4 + %inc = add nuw i32 %i.050, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_floats_four_shorts_same_op +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: LV: Scalar loop costs: 28 +; CHECK: LV: Vector loop of width 2 costs: 35 +; CHECK: LV: Vector loop of width 4 costs: 26 +; CHECK: LV: Selecting VF: 4 +define hidden void @four_floats_four_shorts_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp48.not = icmp eq i32 %N, 0 + br i1 %cmp48.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.049 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.049 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.049 + %1 = load float, ptr %arrayidx1, align 4 + %mul = fmul float %0, %1 + %conv = fptosi float %mul to i16 + %arrayidx3 = getelementptr inbounds nuw %struct.FourShorts, ptr %res, i32 %i.049 + store i16 %conv, ptr %arrayidx3, align 2 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %mul8 = fmul float %2, %3 + %conv9 = fptosi float %mul8 to i16 + %y11 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2 + store i16 %conv9, ptr %y11, align 2 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8 + %4 = load float, ptr %z, align 4 + %z14 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8 + %5 = load float, ptr %z14, align 4 + %mul15 = fmul float %4, %5 + %conv16 = fptosi float %mul15 to i16 + %z18 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4 + store i16 %conv16, ptr %z18, align 2 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12 + %6 = load float, ptr %w, align 4 + %w21 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12 + %7 = load float, ptr %w21, align 4 + %mul22 = fmul float %6, %7 + %conv23 = fptosi float %mul22 to i16 + %w25 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 6 + store i16 %conv23, ptr %w25, align 2 + %inc = add nuw i32 %i.049, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: four_floats_four_shorts_vary_op +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 2: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4 +; CHECK: LV: Scalar loop costs: 28 +; CHECK: LV: Vector loop of width 2 costs: 35 +; CHECK: LV: Vector loop of width 4 costs: 26 +; CHECK: LV: Selecting VF: 4 +define hidden void @four_floats_four_shorts_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) { +entry: + %cmp45.not = icmp eq i32 %N, 0 + br i1 %cmp45.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret void + +for.body: ; preds = %entry, %for.body + %i.046 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.046 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.046 + %1 = load float, ptr %arrayidx1, align 4 + %mul = fmul float %0, %1 + %conv = fptosi float %mul to i16 + %arrayidx3 = getelementptr inbounds nuw %struct.FourShorts, ptr %res, i32 %i.046 + store i16 %conv, ptr %arrayidx3, align 2 + %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4 + %2 = load float, ptr %y, align 4 + %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4 + %3 = load float, ptr %y7, align 4 + %add = fadd float %2, %3 + %conv8 = fptosi float %add to i16 + %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2 + store i16 %conv8, ptr %y10, align 2 + %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8 + %4 = load float, ptr %z, align 4 + %z13 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8 + %5 = load float, ptr %z13, align 4 + %div = fdiv float %4, %5 + %conv14 = fptosi float %div to i16 + %z16 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4 + store i16 %conv14, ptr %z16, align 2 + %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12 + %6 = load float, ptr %w, align 4 + %w19 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12 + %7 = load float, ptr %w19, align 4 + %sub = fsub float %6, %7 + %conv20 = fptosi float %sub to i16 + %w22 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 6 + store i16 %conv20, ptr %w22, align 2 + %inc = add nuw i32 %i.046, 1 + %exitcond.not = icmp eq i32 %inc, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} diff --git a/llvm/test/Transforms/SimplifyCFG/AArch64/prefer-fma.ll b/llvm/test/Transforms/SimplifyCFG/AArch64/prefer-fma.ll index 0f18dc2..46e38d9 100644 --- a/llvm/test/Transforms/SimplifyCFG/AArch64/prefer-fma.ll +++ b/llvm/test/Transforms/SimplifyCFG/AArch64/prefer-fma.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -mtriple=aarch64-linux-gnu -passes=simplifycfg -simplifycfg-require-and-preserve-domtree=1 -hoist-common-insts=true -enable-unsafe-fp-math -S >%t +; RUN: opt < %s -mtriple=aarch64-linux-gnu -passes=simplifycfg -simplifycfg-require-and-preserve-domtree=1 -hoist-common-insts=true -S >%t ; RUN: FileCheck %s < %t ; ModuleID = 't.cc' diff --git a/llvm/test/Transforms/SimplifyCFG/PowerPC/prefer-fma.ll b/llvm/test/Transforms/SimplifyCFG/PowerPC/prefer-fma.ll index c7bc43e1..b61d659 100644 --- a/llvm/test/Transforms/SimplifyCFG/PowerPC/prefer-fma.ll +++ b/llvm/test/Transforms/SimplifyCFG/PowerPC/prefer-fma.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -mtriple=powerpc64le-unknown-linux-gnu -passes=simplifycfg -simplifycfg-require-and-preserve-domtree=1 -hoist-common-insts=true -enable-unsafe-fp-math -S | \ +; RUN: opt < %s -mtriple=powerpc64le-unknown-linux-gnu -passes=simplifycfg -simplifycfg-require-and-preserve-domtree=1 -hoist-common-insts=true -S | \ ; RUN: FileCheck %s ; This case is copied from test/Transforms/SimplifyCFG/AArch64/ diff --git a/llvm/test/Transforms/WholeProgramDevirt/speculative-devirt-single-impl.ll b/llvm/test/Transforms/WholeProgramDevirt/speculative-devirt-single-impl.ll new file mode 100644 index 0000000..10566ae --- /dev/null +++ b/llvm/test/Transforms/WholeProgramDevirt/speculative-devirt-single-impl.ll @@ -0,0 +1,132 @@ +; -stats requires asserts +; REQUIRES: asserts + +; Check that we can still devirtualize outside LTO mode when speculative devirtualization is enabled. +; Check that we skip devirtualization for empty functions in speculative devirtualization mode + +; RUN: opt -S -passes=wholeprogramdevirt -devirtualize-speculatively \ +; RUN: -pass-remarks=wholeprogramdevirt -stats %s 2>&1 | FileCheck %s + +target datalayout = "e-p:64:64" +target triple = "x86_64-unknown-linux-gnu" + +; CHECK: remark: devirt-single.cc:30:32: single-impl: devirtualized a call to vf +; CHECK: remark: devirt-single.cc:41:32: single-impl: devirtualized a call to vf +; CHECK: remark: devirt-single.cc:51:32: single-impl: devirtualized a call to vf +; CHECK: remark: devirt-single.cc:13:0: devirtualized vf +; CHECK-NOT: devirtualized + +@vt1 = constant [1 x ptr] [ptr @vf], !type !8 +@vt2 = constant [1 x ptr] [ptr @vf_empty], !type !12 + +define i1 @vf(ptr %this) #0 !dbg !7 { + ret i1 true +} + +; This should NOT be devirtualized because during non-lto empty functions +; are skipped. +define void @vf_empty(ptr %this) !dbg !11 { + ret void +} + +; CHECK: define void @call +define void @call(ptr %obj) #1 !dbg !5 { + %vtable = load ptr, ptr %obj + %p = call i1 @llvm.public.type.test(ptr %vtable, metadata !"typeid") + call void @llvm.assume(i1 %p) + %fptr = load ptr, ptr %vtable + ; CHECK: if.true.direct_targ: + ; CHECK: call i1 @vf( + ; CHECK: if.false.orig_indirect: + ; CHECK: call i1 %fptr( + call i1 %fptr(ptr %obj), !dbg !6 + ret void +} + + +; CHECK: define void @call1 +define void @call1(ptr %obj) #1 !dbg !9 { + %vtable = load ptr, ptr %obj + %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid1") + call void @llvm.assume(i1 %p) + %fptr = load ptr, ptr %vtable, align 8 + ; CHECK: call i1 %fptr + %1 = call i1 %fptr(ptr %obj), !dbg !10 + ret void +} +declare ptr @llvm.load.relative.i32(ptr, i32) + +@vt3 = private unnamed_addr constant [1 x i32] [ + i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf to i64), i64 ptrtoint (ptr @vt3 to i64)) to i32) +], align 4, !type !15 + +; CHECK: define void @call2 +define void @call2(ptr %obj) #1 !dbg !13 { + %vtable = load ptr, ptr %obj + %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid2") + call void @llvm.assume(i1 %p) + %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 0) + ; CHECK: if.true.direct_targ: + ; CHECK: call i1 @vf( + ; CHECK: if.false.orig_indirect: + ; CHECK: call i1 %fptr( + call i1 %fptr(ptr %obj), !dbg !14 + ret void +} + +@_ZTV1A.local = private unnamed_addr constant { [3 x i32] } { [3 x i32] [ + i32 0, ; offset to top + i32 0, ; rtti + i32 trunc (i64 sub (i64 ptrtoint (ptr dso_local_equivalent @vf to i64), i64 ptrtoint (ptr getelementptr inbounds ({ [3 x i32] }, ptr @_ZTV1A.local, i32 0, i32 0, i32 2) to i64)) to i32) ; vf_emptyunc offset +] }, align 4, !type !18 + +; CHECK: define void @call3 +define void @call3(ptr %obj) #1 !dbg !16 { + %vtable = load ptr, ptr %obj + %p = call i1 @llvm.type.test(ptr %vtable, metadata !"typeid3") + call void @llvm.assume(i1 %p) + %fptr = call ptr @llvm.load.relative.i32(ptr %vtable, i32 8) + ; CHECK: if.true.direct_targ: + ; CHECK: call i1 @vf( + ; CHECK: if.false.orig_indirect: + ; CHECK: call i1 %fptr( + call i1 %fptr(ptr %obj), !dbg !17 + ret void +} + + +declare i1 @llvm.type.test(ptr, metadata) +declare i1 @llvm.public.type.test(ptr, metadata) +declare void @llvm.assume(i1) + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!2, !3} +!llvm.ident = !{!4} + +!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 4.0.0 (trunk 278098)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug) +!1 = !DIFile(filename: "devirt-single.cc", directory: ".") +!2 = !{i32 2, !"Dwarf Version", i32 4} +!3 = !{i32 2, !"Debug Info Version", i32 3} +!4 = !{!"clang version 4.0.0 (trunk 278098)"} +!5 = distinct !DISubprogram(name: "call", linkageName: "_Z4callPv", scope: !1, file: !1, line: 29, isLocal: false, isDefinition: true, scopeLine: 9, flags: DIFlagPrototyped, isOptimized: false, unit: !0) +!6 = !DILocation(line: 30, column: 32, scope: !5) +!7 = distinct !DISubprogram(name: "vf", linkageName: "_ZN3vt12vfEv", scope: !1, file: !1, line: 13, isLocal: false, isDefinition: true, scopeLine: 13, flags: DIFlagPrototyped, isOptimized: false, unit: !0) +!8 = !{i32 0, !"typeid"} + +!9 = distinct !DISubprogram(name: "call1", linkageName: "_Z5call1Pv", scope: !1, file: !1, line: 31, isLocal: false, isDefinition: true, scopeLine: 9, flags: DIFlagPrototyped, isOptimized: false, unit: !0) +!10 = !DILocation(line: 35, column: 32, scope: !9) +!11 = distinct !DISubprogram(name: "vf_empty", linkageName: "_ZN3vt18vf_emptyEv", scope: !1, file: !1, line: 23, isLocal: false, isDefinition: true, scopeLine: 23, flags: DIFlagPrototyped, isOptimized: false, unit: !0) +!12 = !{i32 0, !"typeid1"} + +!13 = distinct !DISubprogram(name: "call2", linkageName: "_Z5call2Pv", scope: !1, file: !1, line: 40, isLocal: false, isDefinition: true, scopeLine: 9, flags: DIFlagPrototyped, isOptimized: false, unit: !0) +!14 = !DILocation(line: 41, column: 32, scope: !13) +!15 = !{i32 0, !"typeid2"} + +!16 = distinct !DISubprogram(name: "call3", linkageName: "_Z5call3Pv", scope: !1, file: !1, line: 50, isLocal: false, isDefinition: true, scopeLine: 9, flags: DIFlagPrototyped, isOptimized: false, unit: !0) +!17 = !DILocation(line: 51, column: 32, scope: !16) +!18 = !{i32 0, !"typeid3"} + + + +; CHECK: 1 wholeprogramdevirt - Number of whole program devirtualization targets +; CHECK: 3 wholeprogramdevirt - Number of single implementation devirtualizations diff --git a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll index d8f5c91..8327e1c 100644 --- a/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll +++ b/llvm/test/Transforms/WholeProgramDevirt/virtual-const-prop-check.ll @@ -11,6 +11,9 @@ ; Check wildcard ; RUN: opt -S -passes=wholeprogramdevirt -whole-program-visibility -pass-remarks=wholeprogramdevirt -wholeprogramdevirt-skip=vf?i1 %s 2>&1 | FileCheck %s --check-prefix=SKIP +; Check that no stats are reported in speculative devirtualization mode as the virtual const prop is disabled. +; RUN: opt -S -passes=wholeprogramdevirt -devirtualize-speculatively -stats %s 2>&1 | FileCheck %s --check-prefix=CHECK-SPECULATIVE-WPD + target datalayout = "e-p:64:64" target triple = "x86_64-unknown-linux-gnu" @@ -225,3 +228,7 @@ declare ptr @llvm.load.relative.i32(ptr, i32) ; CHECK: 2 wholeprogramdevirt - Number of unique return value optimizations ; CHECK: 2 wholeprogramdevirt - Number of virtual constant propagations ; CHECK: 2 wholeprogramdevirt - Number of 1 bit virtual constant propagations + +; CHECK-SPECULATIVE-WPD-NOT: 0 wholeprogramdevirt - Number of unique return value optimizations +; CHECK-SPECULATIVE-WPD-NOT: 0 wholeprogramdevirt - Number of virtual constant propagations +; CHECK-SPECULATIVE-WPD-NOT: 0 wholeprogramdevirt - Number of 1 bit virtual constant propagations diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx1.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx1.s index 1ffe533..d1df304 100644 --- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx1.s +++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx1.s @@ -1403,8 +1403,8 @@ vzeroupper # CHECK-NEXT: 1 8 0.50 * vpblendvb %xmm3, (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 1 0.25 vpblendw $11, %xmm0, %xmm1, %xmm2 # CHECK-NEXT: 1 8 0.50 * vpblendw $11, (%rax), %xmm1, %xmm2 -# CHECK-NEXT: 4 4 2.00 vpclmulqdq $11, %xmm0, %xmm1, %xmm2 -# CHECK-NEXT: 4 11 2.00 * vpclmulqdq $11, (%rax), %xmm1, %xmm2 +# CHECK-NEXT: 4 4 1.50 vpclmulqdq $11, %xmm0, %xmm1, %xmm2 +# CHECK-NEXT: 4 11 1.50 * vpclmulqdq $11, (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 1 0.25 vpcmpeqb %xmm0, %xmm1, %xmm2 # CHECK-NEXT: 1 8 0.50 * vpcmpeqb (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 1 0.25 vpcmpeqd %xmm0, %xmm1, %xmm2 @@ -1415,8 +1415,8 @@ vzeroupper # CHECK-NEXT: 1 8 0.50 * vpcmpeqw (%rax), %xmm1, %xmm2 # CHECK-NEXT: 8 6 3.00 vpcmpestri $1, %xmm0, %xmm2 # CHECK-NEXT: 12 13 3.00 * vpcmpestri $1, (%rax), %xmm2 -# CHECK-NEXT: 7 6 3.00 vpcmpestrm $1, %xmm0, %xmm2 -# CHECK-NEXT: 12 13 3.00 * vpcmpestrm $1, (%rax), %xmm2 +# CHECK-NEXT: 7 7 3.00 vpcmpestrm $1, %xmm0, %xmm2 +# CHECK-NEXT: 12 14 3.00 * vpcmpestrm $1, (%rax), %xmm2 # CHECK-NEXT: 1 1 0.25 vpcmpgtb %xmm0, %xmm1, %xmm2 # CHECK-NEXT: 1 8 0.50 * vpcmpgtb (%rax), %xmm1, %xmm2 # CHECK-NEXT: 1 1 0.25 vpcmpgtd %xmm0, %xmm1, %xmm2 @@ -1427,8 +1427,8 @@ vzeroupper # CHECK-NEXT: 1 8 0.50 * vpcmpgtw (%rax), %xmm1, %xmm2 # CHECK-NEXT: 4 2 2.00 vpcmpistri $1, %xmm0, %xmm2 # CHECK-NEXT: 4 9 2.00 * vpcmpistri $1, (%rax), %xmm2 -# CHECK-NEXT: 3 6 2.00 vpcmpistrm $1, %xmm0, %xmm2 -# CHECK-NEXT: 4 13 2.00 * vpcmpistrm $1, (%rax), %xmm2 +# CHECK-NEXT: 3 7 2.00 vpcmpistrm $1, %xmm0, %xmm2 +# CHECK-NEXT: 4 14 2.00 * vpcmpistrm $1, (%rax), %xmm2 # CHECK-NEXT: 1 3 1.00 vperm2f128 $1, %ymm0, %ymm1, %ymm2 # CHECK-NEXT: 1 10 1.00 * vperm2f128 $1, (%rax), %ymm1, %ymm2 # CHECK-NEXT: 1 1 0.50 vpermilpd $1, %xmm0, %xmm2 @@ -1749,7 +1749,7 @@ vzeroupper # CHECK: Resource pressure per iteration: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] -# CHECK-NEXT: 1.33 1.33 1.33 16.50 16.50 16.50 16.50 - 205.25 393.58 268.08 158.08 208.50 208.50 65.00 119.67 119.67 119.67 107.00 107.00 107.00 19.00 19.00 +# CHECK-NEXT: 1.33 1.33 1.33 16.50 16.50 16.50 16.50 - 204.25 392.58 268.08 158.08 208.50 208.50 65.00 119.67 119.67 119.67 107.00 107.00 107.00 19.00 19.00 # CHECK: Resource pressure by instruction: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] Instructions: @@ -2126,8 +2126,8 @@ vzeroupper # CHECK-NEXT: - - - - - - - - 0.50 - - 0.50 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpblendvb %xmm3, (%rax), %xmm1, %xmm2 # CHECK-NEXT: - - - - - - - - 0.25 0.25 0.25 0.25 - - - - - - - - - - - vpblendw $11, %xmm0, %xmm1, %xmm2 # CHECK-NEXT: - - - - - - - - 0.25 0.25 0.25 0.25 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpblendw $11, (%rax), %xmm1, %xmm2 -# CHECK-NEXT: - - - - - - - - 2.00 2.00 - - - - - - - - - - - - - vpclmulqdq $11, %xmm0, %xmm1, %xmm2 -# CHECK-NEXT: - - - - - - - - 2.00 2.00 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpclmulqdq $11, (%rax), %xmm1, %xmm2 +# CHECK-NEXT: - - - - - - - - 1.50 1.50 - - - - - - - - - - - - - vpclmulqdq $11, %xmm0, %xmm1, %xmm2 +# CHECK-NEXT: - - - - - - - - 1.50 1.50 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpclmulqdq $11, (%rax), %xmm1, %xmm2 # CHECK-NEXT: - - - - - - - - 0.25 0.25 0.25 0.25 - - - - - - - - - - - vpcmpeqb %xmm0, %xmm1, %xmm2 # CHECK-NEXT: - - - - - - - - 0.25 0.25 0.25 0.25 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpcmpeqb (%rax), %xmm1, %xmm2 # CHECK-NEXT: - - - - - - - - 0.25 0.25 0.25 0.25 - - - - - - - - - - - vpcmpeqd %xmm0, %xmm1, %xmm2 diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx2.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx2.s index 6dc5bac..6c8fac4 100644 --- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx2.s +++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx2.s @@ -560,14 +560,14 @@ vpxor (%rax), %ymm1, %ymm2 # CHECK-NEXT: 1 8 0.50 * vpcmpgtw (%rax), %ymm1, %ymm2 # CHECK-NEXT: 1 3 1.00 vperm2i128 $1, %ymm0, %ymm1, %ymm2 # CHECK-NEXT: 1 8 1.00 * vperm2i128 $1, (%rax), %ymm1, %ymm2 -# CHECK-NEXT: 2 5 1.00 vpermd %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 2 12 2.00 * vpermd (%rax), %ymm1, %ymm2 -# CHECK-NEXT: 2 6 1.00 vpermpd $1, %ymm0, %ymm2 -# CHECK-NEXT: 3 13 2.00 * vpermpd $1, (%rax), %ymm2 -# CHECK-NEXT: 2 7 1.00 vpermps %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 3 14 2.00 * vpermps (%rax), %ymm1, %ymm2 -# CHECK-NEXT: 2 6 1.00 vpermq $1, %ymm0, %ymm2 -# CHECK-NEXT: 2 12 2.00 * vpermq $1, (%rax), %ymm2 +# CHECK-NEXT: 1 4 1.00 vpermd %ymm0, %ymm1, %ymm2 +# CHECK-NEXT: 1 11 1.00 * vpermd (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 1 4 1.00 vpermpd $1, %ymm0, %ymm2 +# CHECK-NEXT: 1 11 1.00 * vpermpd $1, (%rax), %ymm2 +# CHECK-NEXT: 1 4 1.00 vpermps %ymm0, %ymm1, %ymm2 +# CHECK-NEXT: 1 11 1.00 * vpermps (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 1 4 1.00 vpermq $1, %ymm0, %ymm2 +# CHECK-NEXT: 1 11 1.00 * vpermq $1, (%rax), %ymm2 # CHECK-NEXT: 1 5 0.33 * vpgatherdd %xmm0, (%rax,%xmm1,2), %xmm2 # CHECK-NEXT: 1 5 0.33 * vpgatherdd %ymm0, (%rax,%ymm1,2), %ymm2 # CHECK-NEXT: 1 5 0.33 * vpgatherdq %xmm0, (%rax,%xmm1,2), %xmm2 @@ -789,7 +789,7 @@ vpxor (%rax), %ymm1, %ymm2 # CHECK: Resource pressure per iteration: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] -# CHECK-NEXT: 6.67 6.67 6.67 - - - - - 93.75 132.75 92.25 36.25 80.50 80.50 29.00 52.33 52.33 52.33 50.67 50.67 50.67 2.50 2.50 +# CHECK-NEXT: 6.67 6.67 6.67 - - - - - 93.75 128.75 92.25 36.25 80.50 80.50 29.00 52.33 52.33 52.33 50.67 50.67 50.67 2.50 2.50 # CHECK: Resource pressure by instruction: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] Instructions: @@ -894,13 +894,13 @@ vpxor (%rax), %ymm1, %ymm2 # CHECK-NEXT: - - - - - - - - - 1.00 - - - - - - - - - - - - - vperm2i128 $1, %ymm0, %ymm1, %ymm2 # CHECK-NEXT: - - - - - - - - - 1.00 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - vperm2i128 $1, (%rax), %ymm1, %ymm2 # CHECK-NEXT: - - - - - - - - - 1.00 - - - - - - - - - - - - - vpermd %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 0.33 0.33 0.33 - - - - - - 2.00 - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpermd (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 0.33 0.33 0.33 - - - - - - 1.00 - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpermd (%rax), %ymm1, %ymm2 # CHECK-NEXT: - - - - - - - - - 1.00 - - - - - - - - - - - - - vpermpd $1, %ymm0, %ymm2 -# CHECK-NEXT: 0.33 0.33 0.33 - - - - - - 2.00 - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpermpd $1, (%rax), %ymm2 +# CHECK-NEXT: 0.33 0.33 0.33 - - - - - - 1.00 - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpermpd $1, (%rax), %ymm2 # CHECK-NEXT: - - - - - - - - - 1.00 - - - - - - - - - - - - - vpermps %ymm0, %ymm1, %ymm2 -# CHECK-NEXT: 0.33 0.33 0.33 - - - - - - 2.00 - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpermps (%rax), %ymm1, %ymm2 +# CHECK-NEXT: 0.33 0.33 0.33 - - - - - - 1.00 - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpermps (%rax), %ymm1, %ymm2 # CHECK-NEXT: - - - - - - - - - 1.00 - - - - - - - - - - - - - vpermq $1, %ymm0, %ymm2 -# CHECK-NEXT: 0.33 0.33 0.33 - - - - - - 2.00 - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpermq $1, (%rax), %ymm2 +# CHECK-NEXT: 0.33 0.33 0.33 - - - - - - 1.00 - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpermq $1, (%rax), %ymm2 # CHECK-NEXT: 0.33 0.33 0.33 - - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpgatherdd %xmm0, (%rax,%xmm1,2), %xmm2 # CHECK-NEXT: 0.33 0.33 0.33 - - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpgatherdd %ymm0, (%rax,%ymm1,2), %ymm2 # CHECK-NEXT: 0.33 0.33 0.33 - - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpgatherdq %xmm0, (%rax,%xmm1,2), %xmm2 diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512.s index 72d7de3..14b8e5f 100644 --- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512.s +++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512.s @@ -1207,7 +1207,7 @@ vunpcklps (%rax){1to16}, %zmm17, %zmm19 {z}{k1} # CHECK-NEXT: 1 3 1.00 vaddps %zmm16, %zmm17, %zmm19 {%k1} {z} # CHECK-NEXT: 1 10 1.00 * vaddps (%rax), %zmm17, %zmm19 {%k1} {z} # CHECK-NEXT: 1 10 1.00 * vaddps (%rax){1to16}, %zmm17, %zmm19 {%k1} {z} -# CHECK-NEXT: 1 4 0.50 valignd $1, %zmm16, %zmm17, %zmm19 +# CHECK-NEXT: 1 2 0.50 valignd $1, %zmm16, %zmm17, %zmm19 # CHECK-NEXT: 1 8 1.00 * valignd $1, (%rax), %zmm17, %zmm19 # CHECK-NEXT: 1 8 1.00 * valignd $1, (%rax){1to16}, %zmm17, %zmm19 # CHECK-NEXT: 1 1 1.00 valignd $1, %zmm16, %zmm17, %zmm19 {%k1} @@ -1216,7 +1216,7 @@ vunpcklps (%rax){1to16}, %zmm17, %zmm19 {z}{k1} # CHECK-NEXT: 1 1 1.00 valignd $1, %zmm16, %zmm17, %zmm19 {%k1} {z} # CHECK-NEXT: 1 8 1.00 * valignd $1, (%rax), %zmm17, %zmm19 {%k1} {z} # CHECK-NEXT: 1 8 1.00 * valignd $1, (%rax){1to16}, %zmm17, %zmm19 {%k1} {z} -# CHECK-NEXT: 1 4 0.50 valignq $1, %zmm16, %zmm17, %zmm19 +# CHECK-NEXT: 1 2 0.50 valignq $1, %zmm16, %zmm17, %zmm19 # CHECK-NEXT: 1 8 1.00 * valignq $1, (%rax), %zmm17, %zmm19 # CHECK-NEXT: 1 8 1.00 * valignq $1, (%rax){1to8}, %zmm17, %zmm19 # CHECK-NEXT: 1 1 1.00 valignq $1, %zmm16, %zmm17, %zmm19 {%k1} diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vl.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vl.s index 552b3e4..ead609e 100644 --- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vl.s +++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vl.s @@ -1948,7 +1948,7 @@ vunpcklps (%rax){1to8}, %ymm17, %ymm19 {z}{k1} # CHECK-NEXT: 1 3 0.50 vaddps %ymm16, %ymm17, %ymm19 {%k1} {z} # CHECK-NEXT: 1 10 0.50 * vaddps (%rax), %ymm17, %ymm19 {%k1} {z} # CHECK-NEXT: 1 10 0.50 * vaddps (%rax){1to8}, %ymm17, %ymm19 {%k1} {z} -# CHECK-NEXT: 1 4 0.50 valignd $1, %xmm16, %xmm17, %xmm19 +# CHECK-NEXT: 1 3 0.50 valignd $1, %xmm16, %xmm17, %xmm19 # CHECK-NEXT: 1 8 0.50 * valignd $1, (%rax), %xmm17, %xmm19 # CHECK-NEXT: 1 8 0.50 * valignd $1, (%rax){1to4}, %xmm17, %xmm19 # CHECK-NEXT: 1 1 0.50 valignd $1, %xmm16, %xmm17, %xmm19 {%k1} @@ -1957,7 +1957,7 @@ vunpcklps (%rax){1to8}, %ymm17, %ymm19 {z}{k1} # CHECK-NEXT: 1 1 0.50 valignd $1, %xmm16, %xmm17, %xmm19 {%k1} {z} # CHECK-NEXT: 1 8 0.50 * valignd $1, (%rax), %xmm17, %xmm19 {%k1} {z} # CHECK-NEXT: 1 8 0.50 * valignd $1, (%rax){1to4}, %xmm17, %xmm19 {%k1} {z} -# CHECK-NEXT: 1 4 0.50 valignd $1, %ymm16, %ymm17, %ymm19 +# CHECK-NEXT: 1 4 1.00 valignd $1, %ymm16, %ymm17, %ymm19 # CHECK-NEXT: 1 8 0.50 * valignd $1, (%rax), %ymm17, %ymm19 # CHECK-NEXT: 1 8 0.50 * valignd $1, (%rax){1to8}, %ymm17, %ymm19 # CHECK-NEXT: 1 1 0.50 valignd $1, %ymm16, %ymm17, %ymm19 {%k1} @@ -1966,7 +1966,7 @@ vunpcklps (%rax){1to8}, %ymm17, %ymm19 {z}{k1} # CHECK-NEXT: 1 1 0.50 valignd $1, %ymm16, %ymm17, %ymm19 {%k1} {z} # CHECK-NEXT: 1 8 0.50 * valignd $1, (%rax), %ymm17, %ymm19 {%k1} {z} # CHECK-NEXT: 1 8 0.50 * valignd $1, (%rax){1to8}, %ymm17, %ymm19 {%k1} {z} -# CHECK-NEXT: 1 4 0.50 valignq $1, %xmm16, %xmm17, %xmm19 +# CHECK-NEXT: 1 3 0.50 valignq $1, %xmm16, %xmm17, %xmm19 # CHECK-NEXT: 1 8 0.50 * valignq $1, (%rax), %xmm17, %xmm19 # CHECK-NEXT: 1 8 0.50 * valignq $1, (%rax){1to2}, %xmm17, %xmm19 # CHECK-NEXT: 1 1 0.50 valignq $1, %xmm16, %xmm17, %xmm19 {%k1} @@ -1975,7 +1975,7 @@ vunpcklps (%rax){1to8}, %ymm17, %ymm19 {z}{k1} # CHECK-NEXT: 1 1 0.50 valignq $1, %xmm16, %xmm17, %xmm19 {%k1} {z} # CHECK-NEXT: 1 8 0.50 * valignq $1, (%rax), %xmm17, %xmm19 {%k1} {z} # CHECK-NEXT: 1 8 0.50 * valignq $1, (%rax){1to2}, %xmm17, %xmm19 {%k1} {z} -# CHECK-NEXT: 1 4 0.50 valignq $1, %ymm16, %ymm17, %ymm19 +# CHECK-NEXT: 1 4 1.00 valignq $1, %ymm16, %ymm17, %ymm19 # CHECK-NEXT: 1 8 0.50 * valignq $1, (%rax), %ymm17, %ymm19 # CHECK-NEXT: 1 8 0.50 * valignq $1, (%rax){1to4}, %ymm17, %ymm19 # CHECK-NEXT: 1 1 0.50 valignq $1, %ymm16, %ymm17, %ymm19 {%k1} @@ -3614,7 +3614,7 @@ vunpcklps (%rax){1to8}, %ymm17, %ymm19 {z}{k1} # CHECK: Resource pressure per iteration: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] -# CHECK-NEXT: 10.67 10.67 10.67 - - - - - 208.00 1083.00 636.50 261.50 509.50 509.50 32.00 355.67 355.67 355.67 334.33 334.33 334.33 32.00 32.00 +# CHECK-NEXT: 10.67 10.67 10.67 - - - - - 208.00 1084.00 637.50 261.50 509.50 509.50 32.00 355.67 355.67 355.67 334.33 334.33 334.33 32.00 32.00 # CHECK: Resource pressure by instruction: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] Instructions: @@ -3663,7 +3663,7 @@ vunpcklps (%rax){1to8}, %ymm17, %ymm19 {z}{k1} # CHECK-NEXT: - - - - - - - - - 0.50 0.50 - - - - - - - - - - - - valignd $1, %xmm16, %xmm17, %xmm19 {%k1} {z} # CHECK-NEXT: - - - - - - - - - 0.50 0.50 - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - valignd $1, (%rax), %xmm17, %xmm19 {%k1} {z} # CHECK-NEXT: - - - - - - - - - 0.50 0.50 - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - valignd $1, (%rax){1to4}, %xmm17, %xmm19 {%k1} {z} -# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - - - - - - - - - - - - valignd $1, %ymm16, %ymm17, %ymm19 +# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - - - - - - - - - - - - valignd $1, %ymm16, %ymm17, %ymm19 # CHECK-NEXT: - - - - - - - - - 0.50 0.50 - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - valignd $1, (%rax), %ymm17, %ymm19 # CHECK-NEXT: - - - - - - - - - 0.50 0.50 - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - valignd $1, (%rax){1to8}, %ymm17, %ymm19 # CHECK-NEXT: - - - - - - - - - 0.50 0.50 - - - - - - - - - - - - valignd $1, %ymm16, %ymm17, %ymm19 {%k1} @@ -3681,7 +3681,7 @@ vunpcklps (%rax){1to8}, %ymm17, %ymm19 {z}{k1} # CHECK-NEXT: - - - - - - - - - 0.50 0.50 - - - - - - - - - - - - valignq $1, %xmm16, %xmm17, %xmm19 {%k1} {z} # CHECK-NEXT: - - - - - - - - - 0.50 0.50 - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - valignq $1, (%rax), %xmm17, %xmm19 {%k1} {z} # CHECK-NEXT: - - - - - - - - - 0.50 0.50 - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - valignq $1, (%rax){1to2}, %xmm17, %xmm19 {%k1} {z} -# CHECK-NEXT: - - - - - - - - - 0.50 0.50 - - - - - - - - - - - - valignq $1, %ymm16, %ymm17, %ymm19 +# CHECK-NEXT: - - - - - - - - - 1.00 1.00 - - - - - - - - - - - - valignq $1, %ymm16, %ymm17, %ymm19 # CHECK-NEXT: - - - - - - - - - 0.50 0.50 - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - valignq $1, (%rax), %ymm17, %ymm19 # CHECK-NEXT: - - - - - - - - - 0.50 0.50 - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - valignq $1, (%rax){1to4}, %ymm17, %ymm19 # CHECK-NEXT: - - - - - - - - - 0.50 0.50 - - - - - - - - - - - - valignq $1, %ymm16, %ymm17, %ymm19 {%k1} diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vpclmulqdq.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vpclmulqdq.s index 87ba060..d1f2a98 100644 --- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vpclmulqdq.s +++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vpclmulqdq.s @@ -13,8 +13,8 @@ vpclmulqdq $11, (%rax), %zmm17, %zmm19 # CHECK-NEXT: [6]: HasSideEffects (U) # CHECK: [1] [2] [3] [4] [5] [6] Instructions: -# CHECK-NEXT: 4 4 2.00 vpclmulqdq $11, %zmm16, %zmm17, %zmm19 -# CHECK-NEXT: 4 11 2.00 * vpclmulqdq $11, (%rax), %zmm17, %zmm19 +# CHECK-NEXT: 4 4 1.50 vpclmulqdq $11, %zmm16, %zmm17, %zmm19 +# CHECK-NEXT: 4 11 1.50 * vpclmulqdq $11, (%rax), %zmm17, %zmm19 # CHECK: Resources: # CHECK-NEXT: [0] - Zn4AGU0 @@ -43,9 +43,9 @@ vpclmulqdq $11, (%rax), %zmm17, %zmm19 # CHECK: Resource pressure per iteration: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] -# CHECK-NEXT: - - - - - - - - 4.00 4.00 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - +# CHECK-NEXT: - - - - - - - - 3.00 3.00 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - # CHECK: Resource pressure by instruction: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] Instructions: -# CHECK-NEXT: - - - - - - - - 2.00 2.00 - - - - - - - - - - - - - vpclmulqdq $11, %zmm16, %zmm17, %zmm19 -# CHECK-NEXT: - - - - - - - - 2.00 2.00 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpclmulqdq $11, (%rax), %zmm17, %zmm19 +# CHECK-NEXT: - - - - - - - - 1.50 1.50 - - - - - - - - - - - - - vpclmulqdq $11, %zmm16, %zmm17, %zmm19 +# CHECK-NEXT: - - - - - - - - 1.50 1.50 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpclmulqdq $11, (%rax), %zmm17, %zmm19 diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vpclmulqdqvl.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vpclmulqdqvl.s index 3c80c56..ea7a280 100644 --- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vpclmulqdqvl.s +++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vpclmulqdqvl.s @@ -16,10 +16,10 @@ vpclmulqdq $11, (%rax), %ymm17, %ymm19 # CHECK-NEXT: [6]: HasSideEffects (U) # CHECK: [1] [2] [3] [4] [5] [6] Instructions: -# CHECK-NEXT: 4 4 2.00 vpclmulqdq $11, %xmm16, %xmm17, %xmm19 -# CHECK-NEXT: 4 11 2.00 * vpclmulqdq $11, (%rax), %xmm17, %xmm19 -# CHECK-NEXT: 4 4 2.00 vpclmulqdq $11, %ymm16, %ymm17, %ymm19 -# CHECK-NEXT: 4 11 2.00 * vpclmulqdq $11, (%rax), %ymm17, %ymm19 +# CHECK-NEXT: 4 4 1.50 vpclmulqdq $11, %xmm16, %xmm17, %xmm19 +# CHECK-NEXT: 4 11 1.50 * vpclmulqdq $11, (%rax), %xmm17, %xmm19 +# CHECK-NEXT: 4 4 1.50 vpclmulqdq $11, %ymm16, %ymm17, %ymm19 +# CHECK-NEXT: 4 11 1.50 * vpclmulqdq $11, (%rax), %ymm17, %ymm19 # CHECK: Resources: # CHECK-NEXT: [0] - Zn4AGU0 @@ -48,11 +48,11 @@ vpclmulqdq $11, (%rax), %ymm17, %ymm19 # CHECK: Resource pressure per iteration: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] -# CHECK-NEXT: - - - - - - - - 8.00 8.00 - - 1.00 1.00 - 0.67 0.67 0.67 0.67 0.67 0.67 - - +# CHECK-NEXT: - - - - - - - - 6.00 6.00 - - 1.00 1.00 - 0.67 0.67 0.67 0.67 0.67 0.67 - - # CHECK: Resource pressure by instruction: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] Instructions: -# CHECK-NEXT: - - - - - - - - 2.00 2.00 - - - - - - - - - - - - - vpclmulqdq $11, %xmm16, %xmm17, %xmm19 -# CHECK-NEXT: - - - - - - - - 2.00 2.00 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpclmulqdq $11, (%rax), %xmm17, %xmm19 -# CHECK-NEXT: - - - - - - - - 2.00 2.00 - - - - - - - - - - - - - vpclmulqdq $11, %ymm16, %ymm17, %ymm19 -# CHECK-NEXT: - - - - - - - - 2.00 2.00 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpclmulqdq $11, (%rax), %ymm17, %ymm19 +# CHECK-NEXT: - - - - - - - - 1.50 1.50 - - - - - - - - - - - - - vpclmulqdq $11, %xmm16, %xmm17, %xmm19 +# CHECK-NEXT: - - - - - - - - 1.50 1.50 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpclmulqdq $11, (%rax), %xmm17, %xmm19 +# CHECK-NEXT: - - - - - - - - 1.50 1.50 - - - - - - - - - - - - - vpclmulqdq $11, %ymm16, %ymm17, %ymm19 +# CHECK-NEXT: - - - - - - - - 1.50 1.50 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpclmulqdq $11, (%rax), %ymm17, %ymm19 diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-bmi1.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-bmi1.s index f4888cf..afbd566 100644 --- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-bmi1.s +++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-bmi1.s @@ -69,12 +69,12 @@ tzcnt (%rax), %rcx # CHECK-NEXT: 2 5 0.33 * blsrl (%rax), %ecx # CHECK-NEXT: 1 1 0.25 blsrq %rax, %rcx # CHECK-NEXT: 2 5 0.33 * blsrq (%rax), %rcx -# CHECK-NEXT: 2 2 1.00 tzcntw %ax, %cx -# CHECK-NEXT: 2 6 0.50 * tzcntw (%rax), %cx -# CHECK-NEXT: 2 2 0.50 tzcntl %eax, %ecx -# CHECK-NEXT: 2 6 0.50 * tzcntl (%rax), %ecx -# CHECK-NEXT: 2 2 0.50 tzcntq %rax, %rcx -# CHECK-NEXT: 2 6 0.50 * tzcntq (%rax), %rcx +# CHECK-NEXT: 1 1 0.25 tzcntw %ax, %cx +# CHECK-NEXT: 1 5 0.50 * tzcntw (%rax), %cx +# CHECK-NEXT: 1 1 0.50 tzcntl %eax, %ecx +# CHECK-NEXT: 1 5 0.50 * tzcntl (%rax), %ecx +# CHECK-NEXT: 1 1 0.50 tzcntq %rax, %rcx +# CHECK-NEXT: 1 5 0.50 * tzcntq (%rax), %rcx # CHECK: Resources: # CHECK-NEXT: [0] - Zn4AGU0 @@ -103,7 +103,7 @@ tzcnt (%rax), %rcx # CHECK: Resource pressure per iteration: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] -# CHECK-NEXT: 4.33 4.33 4.33 5.00 9.50 9.50 5.00 - - - - - - - - 4.33 4.33 4.33 4.33 4.33 4.33 - - +# CHECK-NEXT: 4.33 4.33 4.33 4.25 8.75 8.75 4.25 - - - - - - - - 4.33 4.33 4.33 4.33 4.33 4.33 - - # CHECK: Resource pressure by instruction: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] Instructions: @@ -127,7 +127,7 @@ tzcnt (%rax), %rcx # CHECK-NEXT: 0.33 0.33 0.33 0.25 0.25 0.25 0.25 - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - blsrl (%rax), %ecx # CHECK-NEXT: - - - 0.25 0.25 0.25 0.25 - - - - - - - - - - - - - - - - blsrq %rax, %rcx # CHECK-NEXT: 0.33 0.33 0.33 0.25 0.25 0.25 0.25 - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - blsrq (%rax), %rcx -# CHECK-NEXT: - - - 1.00 1.00 1.00 1.00 - - - - - - - - - - - - - - - - tzcntw %ax, %cx +# CHECK-NEXT: - - - 0.25 0.25 0.25 0.25 - - - - - - - - - - - - - - - - tzcntw %ax, %cx # CHECK-NEXT: 0.33 0.33 0.33 - 0.50 0.50 - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - tzcntw (%rax), %cx # CHECK-NEXT: - - - - 0.50 0.50 - - - - - - - - - - - - - - - - - tzcntl %eax, %ecx # CHECK-NEXT: 0.33 0.33 0.33 - 0.50 0.50 - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - tzcntl (%rax), %ecx diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-cmpxchg.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-cmpxchg.s index 64feeaf..26a42fd 100644 --- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-cmpxchg.s +++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-cmpxchg.s @@ -15,10 +15,10 @@ lock cmpxchg16b (%rax) # CHECK-NEXT: [6]: HasSideEffects (U) # CHECK: [1] [2] [3] [4] [5] [6] Instructions: -# CHECK-NEXT: 19 3 6.00 * * cmpxchg8b (%rax) -# CHECK-NEXT: 28 4 14.75 * * cmpxchg16b (%rax) -# CHECK-NEXT: 19 3 6.00 * * lock cmpxchg8b (%rax) -# CHECK-NEXT: 28 4 14.75 * * lock cmpxchg16b (%rax) +# CHECK-NEXT: 15 3 5.00 * * cmpxchg8b (%rax) +# CHECK-NEXT: 26 2 10.00 * * cmpxchg16b (%rax) +# CHECK-NEXT: 15 3 5.00 * * lock cmpxchg8b (%rax) +# CHECK-NEXT: 26 2 10.00 * * lock cmpxchg16b (%rax) # CHECK: Resources: # CHECK-NEXT: [0] - Zn4AGU0 @@ -47,11 +47,11 @@ lock cmpxchg16b (%rax) # CHECK: Resource pressure per iteration: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] -# CHECK-NEXT: - - - 41.50 41.50 41.50 41.50 - - - - - - - - - - - - - - - - +# CHECK-NEXT: - - - 30.00 30.00 30.00 30.00 - - - - - - - - - - - - - - - - # CHECK: Resource pressure by instruction: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] Instructions: -# CHECK-NEXT: - - - 6.00 6.00 6.00 6.00 - - - - - - - - - - - - - - - - cmpxchg8b (%rax) -# CHECK-NEXT: - - - 14.75 14.75 14.75 14.75 - - - - - - - - - - - - - - - - cmpxchg16b (%rax) -# CHECK-NEXT: - - - 6.00 6.00 6.00 6.00 - - - - - - - - - - - - - - - - lock cmpxchg8b (%rax) -# CHECK-NEXT: - - - 14.75 14.75 14.75 14.75 - - - - - - - - - - - - - - - - lock cmpxchg16b (%rax) +# CHECK-NEXT: - - - 5.00 5.00 5.00 5.00 - - - - - - - - - - - - - - - - cmpxchg8b (%rax) +# CHECK-NEXT: - - - 10.00 10.00 10.00 10.00 - - - - - - - - - - - - - - - - cmpxchg16b (%rax) +# CHECK-NEXT: - - - 5.00 5.00 5.00 5.00 - - - - - - - - - - - - - - - - lock cmpxchg8b (%rax) +# CHECK-NEXT: - - - 10.00 10.00 10.00 10.00 - - - - - - - - - - - - - - - - lock cmpxchg16b (%rax) diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-pclmul.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-pclmul.s index a36fb2aa..fc2bc8e 100644 --- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-pclmul.s +++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-pclmul.s @@ -13,8 +13,8 @@ pclmulqdq $11, (%rax), %xmm2 # CHECK-NEXT: [6]: HasSideEffects (U) # CHECK: [1] [2] [3] [4] [5] [6] Instructions: -# CHECK-NEXT: 4 4 2.00 pclmulqdq $11, %xmm0, %xmm2 -# CHECK-NEXT: 4 11 2.00 * pclmulqdq $11, (%rax), %xmm2 +# CHECK-NEXT: 4 4 1.50 pclmulqdq $11, %xmm0, %xmm2 +# CHECK-NEXT: 4 11 1.50 * pclmulqdq $11, (%rax), %xmm2 # CHECK: Resources: # CHECK-NEXT: [0] - Zn4AGU0 @@ -43,9 +43,9 @@ pclmulqdq $11, (%rax), %xmm2 # CHECK: Resource pressure per iteration: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] -# CHECK-NEXT: - - - - - - - - 4.00 4.00 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - +# CHECK-NEXT: - - - - - - - - 3.00 3.00 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - # CHECK: Resource pressure by instruction: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] Instructions: -# CHECK-NEXT: - - - - - - - - 2.00 2.00 - - - - - - - - - - - - - pclmulqdq $11, %xmm0, %xmm2 -# CHECK-NEXT: - - - - - - - - 2.00 2.00 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - pclmulqdq $11, (%rax), %xmm2 +# CHECK-NEXT: - - - - - - - - 1.50 1.50 - - - - - - - - - - - - - pclmulqdq $11, %xmm0, %xmm2 +# CHECK-NEXT: - - - - - - - - 1.50 1.50 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - pclmulqdq $11, (%rax), %xmm2 diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-sse42.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-sse42.s index 015d37e..ae60835 100644 --- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-sse42.s +++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-sse42.s @@ -52,12 +52,12 @@ pcmpgtq (%rax), %xmm2 # CHECK-NEXT: 1 7 1.00 * crc32q (%rax), %rcx # CHECK-NEXT: 8 6 3.00 pcmpestri $1, %xmm0, %xmm2 # CHECK-NEXT: 12 13 3.00 * pcmpestri $1, (%rax), %xmm2 -# CHECK-NEXT: 7 6 3.00 pcmpestrm $1, %xmm0, %xmm2 -# CHECK-NEXT: 12 13 3.00 * pcmpestrm $1, (%rax), %xmm2 +# CHECK-NEXT: 7 7 3.00 pcmpestrm $1, %xmm0, %xmm2 +# CHECK-NEXT: 12 14 3.00 * pcmpestrm $1, (%rax), %xmm2 # CHECK-NEXT: 4 2 2.00 pcmpistri $1, %xmm0, %xmm2 # CHECK-NEXT: 4 9 2.00 * pcmpistri $1, (%rax), %xmm2 -# CHECK-NEXT: 3 6 2.00 pcmpistrm $1, %xmm0, %xmm2 -# CHECK-NEXT: 4 13 2.00 * pcmpistrm $1, (%rax), %xmm2 +# CHECK-NEXT: 3 7 2.00 pcmpistrm $1, %xmm0, %xmm2 +# CHECK-NEXT: 4 14 2.00 * pcmpistrm $1, (%rax), %xmm2 # CHECK-NEXT: 1 1 0.25 pcmpgtq %xmm0, %xmm2 # CHECK-NEXT: 1 8 0.50 * pcmpgtq (%rax), %xmm2 diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-vpclmulqdq.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-vpclmulqdq.s index 55a36d0..dca4703 100644 --- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-vpclmulqdq.s +++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-vpclmulqdq.s @@ -13,8 +13,8 @@ vpclmulqdq $11, (%rax), %ymm1, %ymm3 # CHECK-NEXT: [6]: HasSideEffects (U) # CHECK: [1] [2] [3] [4] [5] [6] Instructions: -# CHECK-NEXT: 4 4 2.00 vpclmulqdq $11, %ymm0, %ymm1, %ymm3 -# CHECK-NEXT: 4 11 2.00 * vpclmulqdq $11, (%rax), %ymm1, %ymm3 +# CHECK-NEXT: 4 4 1.50 vpclmulqdq $11, %ymm0, %ymm1, %ymm3 +# CHECK-NEXT: 4 11 1.50 * vpclmulqdq $11, (%rax), %ymm1, %ymm3 # CHECK: Resources: # CHECK-NEXT: [0] - Zn4AGU0 @@ -43,9 +43,9 @@ vpclmulqdq $11, (%rax), %ymm1, %ymm3 # CHECK: Resource pressure per iteration: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] -# CHECK-NEXT: - - - - - - - - 4.00 4.00 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - +# CHECK-NEXT: - - - - - - - - 3.00 3.00 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - # CHECK: Resource pressure by instruction: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] Instructions: -# CHECK-NEXT: - - - - - - - - 2.00 2.00 - - - - - - - - - - - - - vpclmulqdq $11, %ymm0, %ymm1, %ymm3 -# CHECK-NEXT: - - - - - - - - 2.00 2.00 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpclmulqdq $11, (%rax), %ymm1, %ymm3 +# CHECK-NEXT: - - - - - - - - 1.50 1.50 - - - - - - - - - - - - - vpclmulqdq $11, %ymm0, %ymm1, %ymm3 +# CHECK-NEXT: - - - - - - - - 1.50 1.50 - - 0.50 0.50 - 0.33 0.33 0.33 0.33 0.33 0.33 - - vpclmulqdq $11, (%rax), %ymm1, %ymm3 diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-x86_64.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-x86_64.s index 9c5b4e4..886d9c6 100644 --- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-x86_64.s +++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-x86_64.s @@ -1173,18 +1173,18 @@ xorq (%rax), %rdi # CHECK-NEXT: 1 6 0.67 * * andq %rsi, (%rax) # CHECK-NEXT: 1 6 0.67 * * lock andq %rsi, (%rax) # CHECK-NEXT: 1 5 0.33 * andq (%rax), %rdi -# CHECK-NEXT: 6 1 1.00 bsfw %si, %di -# CHECK-NEXT: 6 1 1.00 bsrw %si, %di -# CHECK-NEXT: 7 5 1.00 * bsfw (%rax), %di -# CHECK-NEXT: 7 5 1.00 * bsrw (%rax), %di -# CHECK-NEXT: 6 1 1.00 bsfl %esi, %edi -# CHECK-NEXT: 6 1 1.00 bsrl %esi, %edi -# CHECK-NEXT: 7 5 1.00 * bsfl (%rax), %edi -# CHECK-NEXT: 7 5 1.00 * bsrl (%rax), %edi -# CHECK-NEXT: 6 1 1.00 bsfq %rsi, %rdi -# CHECK-NEXT: 6 1 1.00 bsrq %rsi, %rdi -# CHECK-NEXT: 7 5 1.00 * bsfq (%rax), %rdi -# CHECK-NEXT: 7 5 1.00 * bsrq (%rax), %rdi +# CHECK-NEXT: 1 1 1.00 bsfw %si, %di +# CHECK-NEXT: 1 1 1.00 bsrw %si, %di +# CHECK-NEXT: 2 5 1.00 * bsfw (%rax), %di +# CHECK-NEXT: 2 5 1.00 * bsrw (%rax), %di +# CHECK-NEXT: 1 1 1.00 bsfl %esi, %edi +# CHECK-NEXT: 1 1 1.00 bsrl %esi, %edi +# CHECK-NEXT: 2 5 1.00 * bsfl (%rax), %edi +# CHECK-NEXT: 2 5 1.00 * bsrl (%rax), %edi +# CHECK-NEXT: 1 1 1.00 bsfq %rsi, %rdi +# CHECK-NEXT: 1 1 1.00 bsrq %rsi, %rdi +# CHECK-NEXT: 2 5 1.00 * bsfq (%rax), %rdi +# CHECK-NEXT: 2 5 1.00 * bsrq (%rax), %rdi # CHECK-NEXT: 1 1 0.25 bswapl %eax # CHECK-NEXT: 1 1 0.25 bswapq %rax # CHECK-NEXT: 1 1 0.50 btw %si, %di @@ -1321,23 +1321,23 @@ xorq (%rax), %rdi # CHECK-NEXT: 1 1 0.25 decq %rdi # CHECK-NEXT: 1 6 0.67 * * decq (%rax) # CHECK-NEXT: 1 6 0.67 * * lock decq (%rax) -# CHECK-NEXT: 2 10 10.00 U divb %dil -# CHECK-NEXT: 2 14 10.00 * U divb (%rax) -# CHECK-NEXT: 2 11 11.00 U divw %si -# CHECK-NEXT: 2 15 11.00 * U divw (%rax) -# CHECK-NEXT: 2 13 13.00 U divl %edx -# CHECK-NEXT: 2 17 13.00 * U divl (%rax) -# CHECK-NEXT: 2 17 17.00 U divq %rcx -# CHECK-NEXT: 2 21 17.00 * U divq (%rax) +# CHECK-NEXT: 2 9 9.00 U divb %dil +# CHECK-NEXT: 2 13 9.00 * U divb (%rax) +# CHECK-NEXT: 2 10 10.00 U divw %si +# CHECK-NEXT: 2 14 10.00 * U divw (%rax) +# CHECK-NEXT: 2 12 12.00 U divl %edx +# CHECK-NEXT: 2 16 12.00 * U divl (%rax) +# CHECK-NEXT: 2 18 18.00 U divq %rcx +# CHECK-NEXT: 2 22 18.00 * U divq (%rax) # CHECK-NEXT: 100 100 25.00 U enter $7, $4095 -# CHECK-NEXT: 2 10 10.00 U idivb %dil -# CHECK-NEXT: 2 14 10.00 * U idivb (%rax) -# CHECK-NEXT: 2 11 11.00 U idivw %si -# CHECK-NEXT: 2 15 11.00 * U idivw (%rax) -# CHECK-NEXT: 2 13 13.00 U idivl %edx -# CHECK-NEXT: 2 17 13.00 * U idivl (%rax) -# CHECK-NEXT: 2 17 17.00 U idivq %rcx -# CHECK-NEXT: 2 21 17.00 * U idivq (%rax) +# CHECK-NEXT: 2 9 9.00 U idivb %dil +# CHECK-NEXT: 2 13 9.00 * U idivb (%rax) +# CHECK-NEXT: 2 10 10.00 U idivw %si +# CHECK-NEXT: 2 14 10.00 * U idivw (%rax) +# CHECK-NEXT: 2 12 12.00 U idivl %edx +# CHECK-NEXT: 2 16 12.00 * U idivl (%rax) +# CHECK-NEXT: 2 18 18.00 U idivq %rcx +# CHECK-NEXT: 2 22 18.00 * U idivq (%rax) # CHECK-NEXT: 1 3 3.00 imulb %dil # CHECK-NEXT: 1 7 3.00 * imulb (%rax) # CHECK-NEXT: 3 3 3.00 imulw %di @@ -1891,12 +1891,12 @@ xorq (%rax), %rdi # CHECK-NEXT: 1 5 0.67 * * xaddq %rax, (%rbx) # CHECK-NEXT: 1 5 0.67 * * lock xaddq %rax, (%rbx) # CHECK-NEXT: 2 1 0.50 xchgb %bl, %cl -# CHECK-NEXT: 5 7 0.50 * * xchgb %bl, (%rbx) -# CHECK-NEXT: 5 7 0.50 * * lock xchgb %bl, (%rbx) +# CHECK-NEXT: 2 7 0.50 * * xchgb %bl, (%rbx) +# CHECK-NEXT: 2 7 0.50 * * lock xchgb %bl, (%rbx) # CHECK-NEXT: 2 1 0.50 xchgw %bx, %ax # CHECK-NEXT: 2 1 0.50 xchgw %bx, %cx -# CHECK-NEXT: 5 7 0.50 * * xchgw %ax, (%rbx) -# CHECK-NEXT: 5 7 0.50 * * lock xchgw %ax, (%rbx) +# CHECK-NEXT: 2 7 0.50 * * xchgw %ax, (%rbx) +# CHECK-NEXT: 2 7 0.50 * * lock xchgw %ax, (%rbx) # CHECK-NEXT: 2 0 0.33 xchgl %ebx, %eax # CHECK-NEXT: 2 0 0.33 xchgl %ebx, %ecx # CHECK-NEXT: 2 6 0.50 * * xchgl %eax, (%rbx) @@ -1975,7 +1975,7 @@ xorq (%rax), %rdi # CHECK: Resource pressure per iteration: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] -# CHECK-NEXT: 259.00 259.00 259.00 1733.00 1865.50 1775.50 1529.50 1.50 - - - - - - - 259.00 259.00 259.00 151.67 151.67 151.67 161.00 161.00 +# CHECK-NEXT: 259.00 259.00 259.00 1725.00 1865.50 1775.50 1529.50 1.50 - - - - - - - 259.00 259.00 259.00 151.67 151.67 151.67 161.00 161.00 # CHECK: Resource pressure by instruction: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] Instructions: @@ -2266,23 +2266,23 @@ xorq (%rax), %rdi # CHECK-NEXT: - - - 0.25 0.25 0.25 0.25 - - - - - - - - - - - - - - - - decq %rdi # CHECK-NEXT: 0.67 0.67 0.67 0.25 0.25 0.25 0.25 - - - - - - - - 0.67 0.67 0.67 0.33 0.33 0.33 0.50 0.50 decq (%rax) # CHECK-NEXT: 0.67 0.67 0.67 0.25 0.25 0.25 0.25 - - - - - - - - 0.67 0.67 0.67 0.33 0.33 0.33 0.50 0.50 lock decq (%rax) -# CHECK-NEXT: - - - 10.00 - - - - - - - - - - - - - - - - - - - divb %dil -# CHECK-NEXT: 0.33 0.33 0.33 10.00 - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - divb (%rax) -# CHECK-NEXT: - - - 11.00 - - - - - - - - - - - - - - - - - - - divw %si -# CHECK-NEXT: 0.33 0.33 0.33 11.00 - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - divw (%rax) -# CHECK-NEXT: - - - 13.00 - - - - - - - - - - - - - - - - - - - divl %edx -# CHECK-NEXT: 0.33 0.33 0.33 13.00 - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - divl (%rax) -# CHECK-NEXT: - - - 17.00 - - - - - - - - - - - - - - - - - - - divq %rcx -# CHECK-NEXT: 0.33 0.33 0.33 17.00 - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - divq (%rax) +# CHECK-NEXT: - - - 9.00 - - - - - - - - - - - - - - - - - - - divb %dil +# CHECK-NEXT: 0.33 0.33 0.33 9.00 - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - divb (%rax) +# CHECK-NEXT: - - - 10.00 - - - - - - - - - - - - - - - - - - - divw %si +# CHECK-NEXT: 0.33 0.33 0.33 10.00 - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - divw (%rax) +# CHECK-NEXT: - - - 12.00 - - - - - - - - - - - - - - - - - - - divl %edx +# CHECK-NEXT: 0.33 0.33 0.33 12.00 - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - divl (%rax) +# CHECK-NEXT: - - - 18.00 - - - - - - - - - - - - - - - - - - - divq %rcx +# CHECK-NEXT: 0.33 0.33 0.33 18.00 - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - divq (%rax) # CHECK-NEXT: - - - 25.00 25.00 25.00 25.00 - - - - - - - - - - - - - - - - enter $7, $4095 -# CHECK-NEXT: - - - 10.00 - - - - - - - - - - - - - - - - - - - idivb %dil -# CHECK-NEXT: 0.33 0.33 0.33 10.00 - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - idivb (%rax) -# CHECK-NEXT: - - - 11.00 - - - - - - - - - - - - - - - - - - - idivw %si -# CHECK-NEXT: 0.33 0.33 0.33 11.00 - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - idivw (%rax) -# CHECK-NEXT: - - - 13.00 - - - - - - - - - - - - - - - - - - - idivl %edx -# CHECK-NEXT: 0.33 0.33 0.33 13.00 - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - idivl (%rax) -# CHECK-NEXT: - - - 17.00 - - - - - - - - - - - - - - - - - - - idivq %rcx -# CHECK-NEXT: 0.33 0.33 0.33 17.00 - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - idivq (%rax) +# CHECK-NEXT: - - - 9.00 - - - - - - - - - - - - - - - - - - - idivb %dil +# CHECK-NEXT: 0.33 0.33 0.33 9.00 - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - idivb (%rax) +# CHECK-NEXT: - - - 10.00 - - - - - - - - - - - - - - - - - - - idivw %si +# CHECK-NEXT: 0.33 0.33 0.33 10.00 - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - idivw (%rax) +# CHECK-NEXT: - - - 12.00 - - - - - - - - - - - - - - - - - - - idivl %edx +# CHECK-NEXT: 0.33 0.33 0.33 12.00 - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - idivl (%rax) +# CHECK-NEXT: - - - 18.00 - - - - - - - - - - - - - - - - - - - idivq %rcx +# CHECK-NEXT: 0.33 0.33 0.33 18.00 - - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - idivq (%rax) # CHECK-NEXT: - - - - 3.00 - - - - - - - - - - - - - - - - - - imulb %dil # CHECK-NEXT: 0.33 0.33 0.33 - 3.00 - - - - - - - - - - 0.33 0.33 0.33 0.33 0.33 0.33 - - imulb (%rax) # CHECK-NEXT: - - - - 3.00 - - - - - - - - - - - - - - - - - - imulw %di diff --git a/llvm/unittests/ADT/STLExtrasTest.cpp b/llvm/unittests/ADT/STLExtrasTest.cpp index 47469983..966b1f0 100644 --- a/llvm/unittests/ADT/STLExtrasTest.cpp +++ b/llvm/unittests/ADT/STLExtrasTest.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/StringRef.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -27,6 +28,7 @@ using namespace llvm; using testing::ElementsAre; +using testing::ElementsAreArray; using testing::UnorderedElementsAre; namespace { @@ -772,48 +774,30 @@ TEST(STLExtrasTest, DropBeginTest) { SmallVector<int, 5> vec{0, 1, 2, 3, 4}; for (int n = 0; n < 5; ++n) { - int i = n; - for (auto &v : drop_begin(vec, n)) { - EXPECT_EQ(v, i); - i += 1; - } - EXPECT_EQ(i, 5); + EXPECT_THAT(drop_begin(vec, n), + ElementsAreArray(ArrayRef(&vec[n], vec.size() - n))); } } TEST(STLExtrasTest, DropBeginDefaultTest) { SmallVector<int, 5> vec{0, 1, 2, 3, 4}; - int i = 1; - for (auto &v : drop_begin(vec)) { - EXPECT_EQ(v, i); - i += 1; - } - EXPECT_EQ(i, 5); + EXPECT_THAT(drop_begin(vec), ElementsAre(1, 2, 3, 4)); } TEST(STLExtrasTest, DropEndTest) { SmallVector<int, 5> vec{0, 1, 2, 3, 4}; for (int n = 0; n < 5; ++n) { - int i = 0; - for (auto &v : drop_end(vec, n)) { - EXPECT_EQ(v, i); - i += 1; - } - EXPECT_EQ(i, 5 - n); + EXPECT_THAT(drop_end(vec, n), + ElementsAreArray(ArrayRef(vec.data(), vec.size() - n))); } } TEST(STLExtrasTest, DropEndDefaultTest) { SmallVector<int, 5> vec{0, 1, 2, 3, 4}; - int i = 0; - for (auto &v : drop_end(vec)) { - EXPECT_EQ(v, i); - i += 1; - } - EXPECT_EQ(i, 4); + EXPECT_THAT(drop_end(vec), ElementsAre(0, 1, 2, 3)); } TEST(STLExtrasTest, MapRangeTest) { diff --git a/llvm/unittests/CAS/OnDiskCommonUtils.h b/llvm/unittests/CAS/OnDiskCommonUtils.h index 57c8c22..89f93e0 100644 --- a/llvm/unittests/CAS/OnDiskCommonUtils.h +++ b/llvm/unittests/CAS/OnDiskCommonUtils.h @@ -45,7 +45,7 @@ inline HashType digest(StringRef Data) { } inline ValueType valueFromString(StringRef S) { - ValueType Val; + ValueType Val = {}; llvm::copy(S.substr(0, sizeof(Val)), Val.data()); return Val; } diff --git a/llvm/unittests/CAS/OnDiskKeyValueDBTest.cpp b/llvm/unittests/CAS/OnDiskKeyValueDBTest.cpp index 19ea8f5..41512d0 100644 --- a/llvm/unittests/CAS/OnDiskKeyValueDBTest.cpp +++ b/llvm/unittests/CAS/OnDiskKeyValueDBTest.cpp @@ -65,7 +65,7 @@ TEST_F(OnDiskCASTest, OnDiskKeyValueDBTest) { // Insert a lot of entries. for (unsigned I = 0; I < 1024 * 100; ++I) { std::string Index = Twine(I).str(); - ArrayRef<char> Val; + std::optional<ArrayRef<char>> Val; ASSERT_THAT_ERROR( DB->put(digest(Index), valueFromString(Index)).moveInto(Val), Succeeded()); diff --git a/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp b/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp index 16b9979..aa56aaf 100644 --- a/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp +++ b/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp @@ -550,6 +550,31 @@ TEST_F(SelectionDAGPatternMatchTest, matchNode) { EXPECT_FALSE(sd_match(Add, m_Node(ISD::ADD, m_ConstInt(), m_Value()))); } +TEST_F(SelectionDAGPatternMatchTest, matchSelectLike) { + SDLoc DL; + auto Int32VT = EVT::getIntegerVT(Context, 32); + auto VInt32VT = EVT::getVectorVT(Context, Int32VT, 4); + + SDValue Cond = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 0, Int32VT); + SDValue TVal = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, Int32VT); + SDValue FVal = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 2, Int32VT); + + SDValue VCond = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 0, VInt32VT); + SDValue VTVal = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, VInt32VT); + SDValue VFVal = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 2, VInt32VT); + + SDValue Select = DAG->getNode(ISD::SELECT, DL, Int32VT, Cond, TVal, FVal); + SDValue VSelect = + DAG->getNode(ISD::VSELECT, DL, Int32VT, VCond, VTVal, VFVal); + + using namespace SDPatternMatch; + EXPECT_TRUE(sd_match(Select, m_SelectLike(m_Specific(Cond), m_Specific(TVal), + m_Specific(FVal)))); + EXPECT_TRUE( + sd_match(VSelect, m_SelectLike(m_Specific(VCond), m_Specific(VTVal), + m_Specific(VFVal)))); +} + namespace { struct VPMatchContext : public SDPatternMatch::BasicMatchContext { using SDPatternMatch::BasicMatchContext::BasicMatchContext; diff --git a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp index 5d69a31..bfc1275 100644 --- a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp +++ b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp @@ -730,6 +730,11 @@ TEST(ParseArchString, MissingDepency) { EXPECT_EQ(toString(RISCVISAInfo::parseArchString(Input, true).takeError()), ""); } + + EXPECT_EQ(toString(RISCVISAInfo::parseArchString("rv64i_xsfvfbfexp16e", true) + .takeError()), + "'xsfvfbfexp16e' requires 'zvfbfmin' or 'zvfbfa' extension to also " + "be specified"); } TEST(ParseArchString, RejectsUnrecognizedProfileNames) { @@ -1162,6 +1167,11 @@ R"(All available -march extensions for RISC-V xsfmm64t 0.6 xsfmmbase 0.6 xsfvcp 1.0 + xsfvfbfexp16e 0.5 + xsfvfexp16e 0.5 + xsfvfexp32e 0.5 + xsfvfexpa 0.2 + xsfvfexpa64e 0.2 xsfvfnrclipxfqf 1.0 xsfvfwmaccqqq 1.0 xsfvqmaccdod 1.0 diff --git a/llvm/utils/TableGen/InstrInfoEmitter.cpp b/llvm/utils/TableGen/InstrInfoEmitter.cpp index d1b14fb..0b90f91 100644 --- a/llvm/utils/TableGen/InstrInfoEmitter.cpp +++ b/llvm/utils/TableGen/InstrInfoEmitter.cpp @@ -285,7 +285,7 @@ emitGetNamedOperandIdx(raw_ostream &OS, static void emitGetOperandIdxName(raw_ostream &OS, - MapVector<StringRef, unsigned> OperandNameToID, + const MapVector<StringRef, unsigned> &OperandNameToID, const MapVector<SmallVector<int>, unsigned> &OperandMap, unsigned MaxNumOperands, unsigned NumOperandNames) { OS << "LLVM_READONLY OpName getOperandIdxName(uint16_t Opcode, int16_t Idx) " diff --git a/llvm/utils/gn/secondary/llvm/lib/Support/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Support/BUILD.gn index 38ba466..df9ddf9 100644 --- a/llvm/utils/gn/secondary/llvm/lib/Support/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/Support/BUILD.gn @@ -45,6 +45,7 @@ static_library("Support") { "ARMAttributeParser.cpp", "ARMBuildAttributes.cpp", "ARMWinEH.cpp", + "AllocToken.cpp", "Allocator.cpp", "AutoConvert.cpp", "BalancedPartitioning.cpp", diff --git a/llvm/utils/lit/lit/TestRunner.py b/llvm/utils/lit/lit/TestRunner.py index a7e2705..f883145 100644 --- a/llvm/utils/lit/lit/TestRunner.py +++ b/llvm/utils/lit/lit/TestRunner.py @@ -92,12 +92,12 @@ class ShellEnvironment(object): we maintain a dir stack for pushd/popd. """ - def __init__(self, cwd, env, umask=-1, ulimit={}): + def __init__(self, cwd, env, umask=-1, ulimit=None): self.cwd = cwd self.env = dict(env) self.umask = umask self.dirStack = [] - self.ulimit = ulimit + self.ulimit = ulimit if ulimit else {} def change_dir(self, newdir): if os.path.isabs(newdir): diff --git a/llvm/utils/lit/tests/Inputs/shtest-ulimit/ulimit_reset.txt b/llvm/utils/lit/tests/Inputs/shtest-ulimit/ulimit_reset.txt new file mode 100644 index 0000000..011d6db --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/shtest-ulimit/ulimit_reset.txt @@ -0,0 +1,3 @@ +# RUN: %{python} %S/print_limits.py +# Fail the test so that we can assert on the output. +# RUN: not echo return diff --git a/llvm/utils/lit/tests/shtest-ulimit.py b/llvm/utils/lit/tests/shtest-ulimit.py index e843277..9a8febd 100644 --- a/llvm/utils/lit/tests/shtest-ulimit.py +++ b/llvm/utils/lit/tests/shtest-ulimit.py @@ -5,9 +5,9 @@ # as well. # UNSUPPORTED: system-windows, system-solaris -# RUN: not %{lit} -a -v %{inputs}/shtest-ulimit | FileCheck %s +# RUN: not %{lit} -a -v %{inputs}/shtest-ulimit --order=lexical | FileCheck %s -# CHECK: -- Testing: 2 tests{{.*}} +# CHECK: -- Testing: 3 tests{{.*}} # CHECK-LABEL: FAIL: shtest-ulimit :: ulimit-bad-arg.txt ({{[^)]*}}) # CHECK: ulimit -n @@ -16,3 +16,6 @@ # CHECK-LABEL: FAIL: shtest-ulimit :: ulimit_okay.txt ({{[^)]*}}) # CHECK: ulimit -n 50 # CHECK: RLIMIT_NOFILE=50 + +# CHECK-LABEL: FAIL: shtest-ulimit :: ulimit_reset.txt ({{[^)]*}}) +# CHECK-NOT: RLIMIT_NOFILE=50 diff --git a/llvm/utils/update_mc_test_checks.py b/llvm/utils/update_mc_test_checks.py index ab7fe19..67fff56 100755 --- a/llvm/utils/update_mc_test_checks.py +++ b/llvm/utils/update_mc_test_checks.py @@ -290,11 +290,9 @@ def update_test(ti: common.TestInfo): # prefix is selected and generated with most shared output lines # each run_id can only be used once - gen_prefix = "" used_runid = set() - # line number diff between generated prefix and testline - line_offset = 1 + selected_prefixes = set() for prefix, tup in p_dict_sorted.items(): o, run_ids = tup @@ -308,18 +306,24 @@ def update_test(ti: common.TestInfo): else: used_runid.add(i) if not skip: - used_prefixes.add(prefix) + selected_prefixes.add(prefix) - if hasErr(o): - newline = getErrCheckLine(prefix, o, mc_mode, line_offset) - else: - newline = getStdCheckLine(prefix, o, mc_mode) + # Generate check lines in alphabetical order. + check_lines = [] + for prefix in sorted(selected_prefixes): + o, run_ids = p_dict[prefix] + used_prefixes.add(prefix) + + if hasErr(o): + line_offset = len(check_lines) + 1 + check = getErrCheckLine(prefix, o, mc_mode, line_offset) + else: + check = getStdCheckLine(prefix, o, mc_mode) - if newline: - gen_prefix += newline - line_offset += 1 + if check: + check_lines.append(check.strip()) - generated_prefixes[input_line] = gen_prefix.rstrip("\n") + generated_prefixes[input_line] = "\n".join(check_lines) # write output for input_info in ti.iterlines(output_lines): diff --git a/mlir/lib/Dialect/Linalg/Transforms/RuntimeOpVerification.cpp b/mlir/lib/Dialect/Linalg/Transforms/RuntimeOpVerification.cpp index 15eb51a..181b484 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/RuntimeOpVerification.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/RuntimeOpVerification.cpp @@ -17,6 +17,7 @@ #include "mlir/Dialect/Index/IR/IndexOps.h" #include "mlir/Dialect/Linalg/IR/Linalg.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Interfaces/RuntimeVerifiableOpInterface.h" @@ -43,6 +44,32 @@ struct StructuredOpInterface auto zero = arith::ConstantIndexOp::create(builder, loc, 0); auto one = arith::ConstantIndexOp::create(builder, loc, 1); + Value iterationDomainIsNonDegenerate; + for (auto [start, end] : llvm::zip(starts, ends)) { + auto startValue = getValueOrCreateConstantIndexOp(builder, loc, start); + auto endValue = getValueOrCreateConstantIndexOp(builder, loc, end); + + // Loop Trip count > 0 iff start < end + Value dimensionHasNonZeroTripCount = builder.create<index::CmpOp>( + loc, index::IndexCmpPredicate::SLT, startValue, endValue); + + if (!iterationDomainIsNonDegenerate) { + iterationDomainIsNonDegenerate = dimensionHasNonZeroTripCount; + } else { + // Iteration domain is non-degenerate iff all dimensions have loop trip + // count > 0 + iterationDomainIsNonDegenerate = builder.create<arith::AndIOp>( + loc, iterationDomainIsNonDegenerate, dimensionHasNonZeroTripCount); + } + } + + if (!iterationDomainIsNonDegenerate) + return; + + auto ifOp = builder.create<scf::IfOp>(loc, iterationDomainIsNonDegenerate, + /*withElseRegion=*/false); + builder.setInsertionPointToStart(&ifOp.getThenRegion().front()); + // Subtract one from the loop ends before composing with the indexing map transform(ends, ends.begin(), [&](OpFoldResult end) { auto endValue = getValueOrCreateConstantIndexOp(builder, loc, end); @@ -110,6 +137,7 @@ struct StructuredOpInterface builder.createOrFold<cf::AssertOp>(loc, cmpOp, msg); } } + builder.setInsertionPointAfter(ifOp); } }; diff --git a/mlir/python/CMakeLists.txt b/mlir/python/CMakeLists.txt index 84ff08a..e79b51a 100644 --- a/mlir/python/CMakeLists.txt +++ b/mlir/python/CMakeLists.txt @@ -349,6 +349,15 @@ declare_mlir_dialect_python_bindings( declare_mlir_dialect_python_bindings( ADD_TO_PARENT MLIRPythonSources.Dialects ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/mlir" + TD_FILE dialects/ShardOps.td + SOURCES + dialects/shard.py + DIALECT_NAME shard + GEN_ENUM_BINDINGS) + +declare_mlir_dialect_python_bindings( + ADD_TO_PARENT MLIRPythonSources.Dialects + ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/mlir" TD_FILE dialects/MLProgramOps.td SOURCES dialects/ml_program.py diff --git a/mlir/python/mlir/dialects/ShardOps.td b/mlir/python/mlir/dialects/ShardOps.td new file mode 100644 index 0000000..f852766 --- /dev/null +++ b/mlir/python/mlir/dialects/ShardOps.td @@ -0,0 +1,14 @@ +//===-- ShardOps.td - Entry point for ShardOps bindings ---------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef PYTHON_BINDINGS_SHARD_OPS +#define PYTHON_BINDINGS_SHARD_OPS + +include "mlir/Dialect/Shard/IR/ShardOps.td" + +#endif diff --git a/mlir/python/mlir/dialects/shard.py b/mlir/python/mlir/dialects/shard.py new file mode 100644 index 0000000..8d69f17 --- /dev/null +++ b/mlir/python/mlir/dialects/shard.py @@ -0,0 +1,6 @@ +# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +from ._shard_ops_gen import * +from ._shard_enum_gen import * diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/runtime-verification.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/runtime-verification.mlir index 9f4393e..127ab70 100644 --- a/mlir/test/Integration/Dialect/Linalg/CPU/runtime-verification.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/runtime-verification.mlir @@ -103,6 +103,17 @@ func.func @main() { // CHECK: unexpected negative result on dimension #0 of input/output operand #0 func.call @reverse_from_3(%d5x) : (tensor<?xf32>) -> (tensor<?xf32>) + %c0x = arith.constant dense<1.0> : tensor<0xf32> + %d0x = tensor.cast %c0x : tensor<0xf32> to tensor<?xf32> + // CHECK-NOT: ERROR: Runtime op verification failed + func.call @fill_empty_1d(%d0x) : (tensor<?xf32>) -> (tensor<?xf32>) + + %c0x5 = arith.constant dense<0.0> : tensor<0x5xf32> + %d0x5 = tensor.cast %c0x5 : tensor<0x5xf32> to tensor<?x?xf32> + + // CHECK-NOT: ERROR: Runtime op verification failed + func.call @fill_empty_2d(%d0x5) : (tensor<?x?xf32>) -> (tensor<?x?xf32>) + return } @@ -297,3 +308,15 @@ func.func @reverse_from_3(%arg0: tensor<?xf32>) -> (tensor<?xf32>) { } -> tensor<?xf32> return %result : tensor<?xf32> } + +func.func @fill_empty_1d(%arg0: tensor<?xf32>) -> (tensor<?xf32>) { + %c0 = arith.constant 0.0 : f32 + %0 = linalg.fill ins(%c0 : f32) outs(%arg0 : tensor<?xf32>) -> tensor<?xf32> + return %0 : tensor<?xf32> +} + +func.func @fill_empty_2d(%arg0: tensor<?x?xf32>) -> (tensor<?x?xf32>) { + %c0 = arith.constant 0.0 : f32 + %0 = linalg.fill ins(%c0 : f32) outs(%arg0 : tensor<?x?xf32>) -> tensor<?x?xf32> + return %0 : tensor<?x?xf32> +} diff --git a/mlir/test/Interfaces/TilingInterface/tile-and-fuse-using-interface.mlir b/mlir/test/Interfaces/TilingInterface/tile-and-fuse-using-interface.mlir index 8116044..21d7816 100644 --- a/mlir/test/Interfaces/TilingInterface/tile-and-fuse-using-interface.mlir +++ b/mlir/test/Interfaces/TilingInterface/tile-and-fuse-using-interface.mlir @@ -658,33 +658,20 @@ module attributes {transform.with_named_sequence} { } } -// CHECK: func.func private @tile_one_consumer_using_tile_and_fuse(%[[VAL_0:.*]]: tensor<16x128x48x96xf32>, %[[VAL_1:.*]]: tensor<16x96x48x128xf32>) -> tensor<16x96x48x128xf32> { -// CHECK: %[[VAL_2:.*]] = arith.constant 0 : index -// CHECK: %[[VAL_3:.*]] = arith.constant 16 : index -// CHECK: %[[VAL_4:.*]] = arith.constant 128 : index -// CHECK: %[[VAL_5:.*]] = arith.constant 48 : index -// CHECK: %[[VAL_6:.*]] = arith.constant 96 : index -// CHECK: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_8:.*]] = scf.for %[[VAL_9:.*]] = %[[VAL_2]] to %[[VAL_3]] step %[[VAL_7]] iter_args(%[[VAL_10:.*]] = %[[VAL_1]]) -> (tensor<16x96x48x128xf32>) { -// CHECK: %[[VAL_11:.*]] = scf.for %[[VAL_12:.*]] = %[[VAL_2]] to %[[VAL_4]] step %[[VAL_3]] iter_args(%[[VAL_13:.*]] = %[[VAL_10]]) -> (tensor<16x96x48x128xf32>) { -// CHECK: %[[VAL_14:.*]] = scf.for %[[VAL_15:.*]] = %[[VAL_2]] to %[[VAL_5]] step %[[VAL_3]] iter_args(%[[VAL_16:.*]] = %[[VAL_13]]) -> (tensor<16x96x48x128xf32>) { -// CHECK: %[[VAL_17:.*]] = scf.for %[[VAL_18:.*]] = %[[VAL_2]] to %[[VAL_6]] step %[[VAL_3]] iter_args(%[[VAL_19:.*]] = %[[VAL_16]]) -> (tensor<16x96x48x128xf32>) { -// CHECK: %[[VAL_20:.*]] = tensor.extract_slice %[[VAL_0]]{{\[}}%[[VAL_9]], %[[VAL_12]], %[[VAL_15]], %[[VAL_18]]] [1, 16, 16, 16] [1, 1, 1, 1] : tensor<16x128x48x96xf32> to tensor<1x16x16x16xf32> -// CHECK: %[[VAL_21:.*]] = tensor.extract_slice %[[VAL_19]]{{\[}}%[[VAL_9]], %[[VAL_18]], %[[VAL_15]], %[[VAL_12]]] [1, 16, 16, 16] [1, 1, 1, 1] : tensor<16x96x48x128xf32> to tensor<1x16x16x16xf32> -// CHECK: %[[VAL_22:.*]] = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[VAL_20]] : tensor<1x16x16x16xf32>) outs(%[[VAL_21]] : tensor<1x16x16x16xf32>) { -// CHECK: ^bb0(%[[VAL_23:.*]]: f32, %[[VAL_24:.*]]: f32): -// CHECK: linalg.yield %[[VAL_23]] : f32 -// CHECK: } -> tensor<1x16x16x16xf32> -// CHECK: %[[VAL_25:.*]] = tensor.insert_slice %[[VAL_26:.*]] into %[[VAL_19]]{{\[}}%[[VAL_9]], %[[VAL_18]], %[[VAL_15]], %[[VAL_12]]] [1, 16, 16, 16] [1, 1, 1, 1] : tensor<1x16x16x16xf32> into tensor<16x96x48x128xf32> -// CHECK: scf.yield %[[VAL_25]] : tensor<16x96x48x128xf32> -// CHECK: } -// CHECK: scf.yield %[[VAL_27:.*]] : tensor<16x96x48x128xf32> -// CHECK: } -// CHECK: scf.yield %[[VAL_28:.*]] : tensor<16x96x48x128xf32> -// CHECK: } -// CHECK: scf.yield %[[VAL_29:.*]] : tensor<16x96x48x128xf32> -// CHECK: } -// CHECK: return %[[VAL_30:.*]] : tensor<16x96x48x128xf32> -// CHECK: } -// CHECK: } - +// CHECK-LABEL: func private @tile_one_consumer_using_tile_and_fuse +// CHECK-SAME: %[[ARG0:.*]]: tensor<16x128x48x96xf32> +// CHECK-SAME: %[[ARG1:.*]]: tensor<16x96x48x128xf32> +// CHECK: scf.for %[[IV0:[a-zA-Z0-9]+]] = +// CHECK-SAME: iter_args(%[[ITERARG0:.+]] = %[[ARG1]]) +// CHECK: scf.for %[[IV1:[a-zA-Z0-9]+]] = +// CHECK-SAME: iter_args(%[[ITERARG1:.+]] = %[[ITERARG0]]) +// CHECK: scf.for %[[IV2:[a-zA-Z0-9]+]] = +// CHECK-SAME: iter_args(%[[ITERARG2:.+]] = %[[ITERARG1]]) +// CHECK: scf.for %[[IV3:[a-zA-Z0-9]+]] = +// CHECK-SAME: iter_args(%[[ITERARG3:.+]] = %[[ITERARG2]]) +// CHECK: %[[TILEDARG0:.*]] = tensor.extract_slice %[[ARG0]]{{\[}}%[[IV0]], %[[IV1]], %[[IV2]], %[[IV3]]] +// CHECK: %[[TILEDARG1:.*]] = tensor.extract_slice %[[ITERARG3]]{{\[}}%[[IV0]], %[[IV3]], %[[IV2]], %[[IV1]]] +// CHECK: %[[RES:.*]] = linalg.generic +// CHECK-SAME: ins(%[[TILEDARG0]] +// CHECK-SAME: outs(%[[TILEDARG1]] +// CHECK: tensor.insert_slice %[[RES:.*]] diff --git a/mlir/test/python/dialects/shard.py b/mlir/test/python/dialects/shard.py new file mode 100644 index 0000000..c3ba605 --- /dev/null +++ b/mlir/test/python/dialects/shard.py @@ -0,0 +1,67 @@ +# RUN: %PYTHON %s | FileCheck %s + +from mlir.ir import * +from mlir.dialects import shard +from mlir.dialects import func + + +def constructAndPrintInModule(f): + print("\nTEST:", f.__name__) + with Context(), Location.unknown(): + module = Module.create() + with InsertionPoint(module.body): + f() + print(module) + module.operation.verify() + return f + + +# CHECK-LABEL: TEST: testShardGrid +@constructAndPrintInModule +def testShardGrid(): + # Test creating shard grids with different shapes + grid2d = shard.GridOp("grid_2d", [2, 2]) + grid1d = shard.GridOp("grid_1d", [4]) + + # CHECK: shard.grid @grid_2d(shape = 2x2) + # CHECK: shard.grid @grid_1d(shape = 4) + + +# CHECK-LABEL: TEST: testCollectiveOperations +@constructAndPrintInModule +def testCollectiveOperations(): + # Create grid and types + grid_op = shard.GridOp("grid_2x2", [2, 2]) + i32 = IntegerType.get_signless(32) + index_type = IndexType.get() + input_type = RankedTensorType.get([4, 2], i32) + gather_result_type = RankedTensorType.get([4, 4], i32) + + # Create a function to hold the operations + func_type = FunctionType.get([input_type], [input_type]) + test_func = func.FuncOp("test_collectives", func_type) + + with InsertionPoint(test_func.add_entry_block()): + arg = test_func.entry_block.arguments[0] + + gather_op = shard.AllGatherOp( + input=arg, + grid=FlatSymbolRefAttr.get("grid_2x2"), + grid_axes=DenseI16ArrayAttr.get([1]), + gather_axis=IntegerAttr.get(index_type, 1), + result=gather_result_type, + ) + + reduce_op = shard.AllReduceOp( + input=arg, + grid=FlatSymbolRefAttr.get("grid_2x2"), + reduction=shard.ReductionKind.Sum, + result=input_type, + ) + + func.ReturnOp([reduce_op]) + + # CHECK: shard.grid @grid_2x2(shape = 2x2) + # CHECK: func.func @test_collectives(%arg0: tensor<4x2xi32>) -> tensor<4x2xi32> + # CHECK: %all_gather = shard.all_gather %arg0 on @grid_2x2 grid_axes = [1] gather_axis = 1 : tensor<4x2xi32> -> tensor<4x4xi32> + # CHECK: %all_reduce = shard.all_reduce %arg0 on @grid_2x2 : tensor<4x2xi32> -> tensor<4x2xi32> diff --git a/offload/test/mapping/use_device_addr/target_data_use_device_addr_class_member_ref_with_map.cpp b/offload/test/mapping/use_device_addr/target_data_use_device_addr_class_member_ref_with_map.cpp index 5e8769e..50a28e0 100644 --- a/offload/test/mapping/use_device_addr/target_data_use_device_addr_class_member_ref_with_map.cpp +++ b/offload/test/mapping/use_device_addr/target_data_use_device_addr_class_member_ref_with_map.cpp @@ -16,7 +16,7 @@ struct ST { int m = 0; void f6() { - uintptr_t offset = (uintptr_t)&d - n; + ptrdiff_t offset = (char *)&d - ((char *)(uintptr_t)n); #pragma omp target data map(to : m, d) { void *mapped_ptr = omp_get_mapped_ptr(&d, omp_get_default_device()); @@ -34,11 +34,15 @@ struct ST { // ref/attach modifiers: // &ref_ptee(this[0].[d])), &ref_ptee(this[0].d), TO | FROM // &ref_ptr(this[0].d), &ref_ptee(this[0].d), 4, ATTACH - // EXPECTED: 1 0 - // CHECK: 0 1 - printf("%d %d\n", &d == mapped_ptr, - (uintptr_t)&d == (uintptr_t)mapped_ptr - offset); + // EXPECTED: 1 + // CHECK-NEXT: 0 + printf("%d\n", &d == mapped_ptr); + ptrdiff_t offset_device = (char *)mapped_ptr - (char *)&d; + printf("offset = %td (%p), offset_device = %td (%p)\n", offset, + (void *)offset, offset_device, (void *)offset_device); + printf("mapped_ptr = %p, device_addr = %p, ", mapped_ptr, &d); } + printf("host_addr = %p\n", &d); } } }; diff --git a/offload/test/ompt/callbacks.h b/offload/test/ompt/callbacks.h index 95437d9..2e7763f 100644 --- a/offload/test/ompt/callbacks.h +++ b/offload/test/ompt/callbacks.h @@ -5,6 +5,37 @@ // Tool related code below #include <omp-tools.h> +static const char *ompt_target_data_op_t_values[] = { + "", + "ompt_target_data_alloc", + "ompt_target_data_transfer_to_device", + "ompt_target_data_transfer_from_device", + "ompt_target_data_delete", + "ompt_target_data_associate", + "ompt_target_data_disassociate", + "ompt_target_data_alloc_async", + "ompt_target_data_transfer_to_device_async", + "ompt_target_data_transfer_from_device_async", + "ompt_target_data_delete_async"}; + +static const char *ompt_scope_endpoint_t_values[] = { + "", "ompt_scope_begin", "ompt_scope_end", "ompt_scope_beginend"}; + +static const char *ompt_target_t_values[] = {"", + "ompt_target", + "ompt_target_enter_data", + "ompt_target_exit_data", + "ompt_target_update", + "", + "", + "", + "", + "", + "ompt_target_nowait", + "ompt_target_enter_data_nowait", + "ompt_target_exit_data_nowait", + "ompt_target_update_nowait"}; + // For EMI callbacks ompt_id_t next_op_id = 0x8000000000000001; @@ -38,11 +69,11 @@ static void on_ompt_callback_target_data_op( void *src_addr, int src_device_num, void *dest_addr, int dest_device_num, size_t bytes, const void *codeptr_ra) { assert(codeptr_ra != 0 && "Unexpected null codeptr"); - printf(" Callback DataOp: target_id=%lu host_op_id=%lu optype=%d src=%p " + printf(" Callback DataOp: target_id=%lu host_op_id=%lu optype=%s src=%p " "src_device_num=%d " "dest=%p dest_device_num=%d bytes=%lu code=%p\n", - target_id, host_op_id, optype, src_addr, src_device_num, dest_addr, - dest_device_num, bytes, codeptr_ra); + target_id, host_op_id, ompt_target_data_op_t_values[optype], src_addr, + src_device_num, dest_addr, dest_device_num, bytes, codeptr_ra); } static void on_ompt_callback_target(ompt_target_t kind, @@ -51,9 +82,10 @@ static void on_ompt_callback_target(ompt_target_t kind, ompt_id_t target_id, const void *codeptr_ra) { assert(codeptr_ra != 0 && "Unexpected null codeptr"); - printf("Callback Target: target_id=%lu kind=%d endpoint=%d device_num=%d " + printf("Callback Target: target_id=%lu kind=%s endpoint=%s device_num=%d " "code=%p\n", - target_id, kind, endpoint, device_num, codeptr_ra); + target_id, ompt_target_t_values[kind], + ompt_scope_endpoint_t_values[endpoint], device_num, codeptr_ra); } static void on_ompt_callback_target_submit(ompt_id_t target_id, @@ -84,13 +116,15 @@ static void on_ompt_callback_target_data_op_emi( // target_task_data may be null, avoid dereferencing it uint64_t target_task_data_value = (target_task_data) ? target_task_data->value : 0; - printf(" Callback DataOp EMI: endpoint=%d optype=%d target_task_data=%p " + printf(" Callback DataOp EMI: endpoint=%s optype=%s target_task_data=%p " "(0x%lx) target_data=%p (0x%lx) host_op_id=%p (0x%lx) src=%p " "src_device_num=%d " "dest=%p dest_device_num=%d bytes=%lu code=%p\n", - endpoint, optype, target_task_data, target_task_data_value, - target_data, target_data->value, host_op_id, *host_op_id, src_addr, - src_device_num, dest_addr, dest_device_num, bytes, codeptr_ra); + ompt_scope_endpoint_t_values[endpoint], + ompt_target_data_op_t_values[optype], target_task_data, + target_task_data_value, target_data, target_data->value, host_op_id, + *host_op_id, src_addr, src_device_num, dest_addr, dest_device_num, + bytes, codeptr_ra); } static void on_ompt_callback_target_emi(ompt_target_t kind, @@ -102,20 +136,21 @@ static void on_ompt_callback_target_emi(ompt_target_t kind, assert(codeptr_ra != 0 && "Unexpected null codeptr"); if (endpoint == ompt_scope_begin) target_data->value = next_op_id++; - printf("Callback Target EMI: kind=%d endpoint=%d device_num=%d task_data=%p " + printf("Callback Target EMI: kind=%s endpoint=%s device_num=%d task_data=%p " "(0x%lx) target_task_data=%p (0x%lx) target_data=%p (0x%lx) code=%p\n", - kind, endpoint, device_num, task_data, task_data->value, - target_task_data, target_task_data->value, target_data, - target_data->value, codeptr_ra); + ompt_target_t_values[kind], ompt_scope_endpoint_t_values[endpoint], + device_num, task_data, task_data ? task_data->value : 0, + target_task_data, target_task_data ? target_task_data->value : 0, + target_data, target_data->value, codeptr_ra); } static void on_ompt_callback_target_submit_emi( ompt_scope_endpoint_t endpoint, ompt_data_t *target_data, ompt_id_t *host_op_id, unsigned int requested_num_teams) { - printf(" Callback Submit EMI: endpoint=%d req_num_teams=%d target_data=%p " + printf(" Callback Submit EMI: endpoint=%s req_num_teams=%d target_data=%p " "(0x%lx) host_op_id=%p (0x%lx)\n", - endpoint, requested_num_teams, target_data, target_data->value, - host_op_id, *host_op_id); + ompt_scope_endpoint_t_values[endpoint], requested_num_teams, + target_data, target_data->value, host_op_id, *host_op_id); } static void on_ompt_callback_target_map_emi(ompt_data_t *target_data, diff --git a/offload/test/ompt/omp_api.c b/offload/test/ompt/omp_api.c index a16ef7a..5fb2098 100644 --- a/offload/test/ompt/omp_api.c +++ b/offload/test/ompt/omp_api.c @@ -1,6 +1,8 @@ +// clang-format off // RUN: %libomptarget-compile-run-and-check-generic // REQUIRES: ompt // REQUIRES: gpu +// clang-format on #include "omp.h" #include <stdlib.h> @@ -32,8 +34,8 @@ int main(int argc, char **argv) { // clang-format off /// CHECK: Callback Init: -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=5 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=6 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_associate +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_disassociate +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete /// CHECK: Callback Fini: diff --git a/offload/test/ompt/target_memcpy.c b/offload/test/ompt/target_memcpy.c index f244e0f4..f769995 100644 --- a/offload/test/ompt/target_memcpy.c +++ b/offload/test/ompt/target_memcpy.c @@ -1,6 +1,8 @@ +// clang-format off // RUN: %libomptarget-compile-run-and-check-generic // REQUIRES: ompt // REQUIRES: gpu +// clang-format on /* * Verify that for the target OpenMP APIs, the return address is non-null and @@ -46,26 +48,26 @@ int main() { } // clang-format off -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc /// CHECK-SAME: src_device_num=[[HOST:[0-9]+]] /// CHECK-SAME: dest_device_num=[[DEVICE:[0-9]+]] /// CHECK-NOT: code=(nil) /// CHECK: code=[[CODE1:0x[0-f]+]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device /// CHECK-SAME: src_device_num=[[HOST]] {{.+}} dest_device_num=[[DEVICE]] /// CHECK-NOT: code=(nil) /// CHECK-NOT: code=[[CODE1]] /// CHECK: code=[[CODE2:0x[0-f]+]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device /// CHECK-SAME: src_device_num=[[DEVICE]] {{.+}} dest_device_num=[[DEVICE]] /// CHECK-NOT: code=(nil) /// CHECK-NOT: code=[[CODE2]] /// CHECK: code=[[CODE3:0x[0-f]+]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device /// CHECK-SAME: src_device_num=[[DEVICE]] {{.+}} dest_device_num=[[HOST]] /// CHECK-NOT: code=(nil) /// CHECK-NOT: code=[[CODE3]] /// CHECK: code=[[CODE4:0x[0-f]+]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete /// CHECK-NOT: code=(nil) /// CHECK-NOT: code=[[CODE4]] diff --git a/offload/test/ompt/target_memcpy_emi.c b/offload/test/ompt/target_memcpy_emi.c index 934caba..39f262a3 100644 --- a/offload/test/ompt/target_memcpy_emi.c +++ b/offload/test/ompt/target_memcpy_emi.c @@ -1,6 +1,8 @@ +// clang-format off // RUN: %libomptarget-compile-run-and-check-generic // REQUIRES: ompt // REQUIRES: gpu +// clang-format on /* * Verify all three data transfer directions: H2D, D2D and D2H @@ -54,28 +56,28 @@ int main(void) { /// CHECK: Callback Init: /// CHECK: Allocating Memory on Device -/// CHECK: Callback DataOp EMI: endpoint=1 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_alloc /// CHECK-SAME: src_device_num=[[HOST:[0-9]+]] /// CHECK-SAME: dest_device_num=[[DEVICE:[0-9]+]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=1 {{.+}} src_device_num=[[HOST]] {{.+}} dest_device_num=[[DEVICE]] +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_alloc {{.+}} src_device_num=[[HOST]] {{.+}} dest_device_num=[[DEVICE]] /// CHECK: Testing: Host to Device -/// CHECK: Callback DataOp EMI: endpoint=1 optype=2 {{.+}} src_device_num=[[HOST]] {{.+}} dest_device_num=[[DEVICE]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=2 {{.+}} src_device_num=[[HOST]] {{.+}} dest_device_num=[[DEVICE]] +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_to_device {{.+}} src_device_num=[[HOST]] {{.+}} dest_device_num=[[DEVICE]] +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_to_device {{.+}} src_device_num=[[HOST]] {{.+}} dest_device_num=[[DEVICE]] /// CHECK: Testing: Device to Device -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 {{.+}} src_device_num=[[DEVICE]] {{.+}} dest_device_num=[[DEVICE]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 {{.+}} src_device_num=[[DEVICE]] {{.+}} dest_device_num=[[DEVICE]] +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device {{.+}} src_device_num=[[DEVICE]] {{.+}} dest_device_num=[[DEVICE]] +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device {{.+}} src_device_num=[[DEVICE]] {{.+}} dest_device_num=[[DEVICE]] /// CHECK: Testing: Device to Host -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 {{.+}} src_device_num=[[DEVICE]] {{.+}} dest_device_num=[[HOST]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 {{.+}} src_device_num=[[DEVICE]] {{.+}} dest_device_num=[[HOST]] +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device {{.+}} src_device_num=[[DEVICE]] {{.+}} dest_device_num=[[HOST]] +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device {{.+}} src_device_num=[[DEVICE]] {{.+}} dest_device_num=[[HOST]] /// CHECK: Checking Correctness /// CHECK: Freeing Memory on Device -/// CHECK: Callback DataOp EMI: endpoint=1 optype=4 {{.+}} src_device_num=[[DEVICE]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=4 {{.+}} src_device_num=[[DEVICE]] +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_delete {{.+}} src_device_num=[[DEVICE]] +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_delete {{.+}} src_device_num=[[DEVICE]] /// CHECK: Callback Fini: diff --git a/offload/test/ompt/veccopy.c b/offload/test/ompt/veccopy.c index f28d94f..24d7363 100644 --- a/offload/test/ompt/veccopy.c +++ b/offload/test/ompt/veccopy.c @@ -1,6 +1,8 @@ +// clang-format off // RUN: %libomptarget-compile-run-and-check-generic // REQUIRES: ompt // REQUIRES: gpu +// clang-format on /* * Example OpenMP program that registers non-EMI callbacks @@ -54,48 +56,47 @@ int main() { // clang-format off /// CHECK: Callback Init: /// CHECK: Callback Load: -/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=1 device_num=[[DEVICE_NUM:[0-9]+]] +/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=ompt_target endpoint=ompt_scope_begin device_num=[[DEVICE_NUM:[0-9]+]] /// CHECK-NOT: code=(nil) /// CHECK: code=[[CODE1:.*]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device /// CHECK: code=[[CODE1]] /// CHECK: Callback Submit: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] req_num_teams=1 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete /// CHECK: code=[[CODE1]] -/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=2 device_num=[[DEVICE_NUM]] code=[[CODE1]] +/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=ompt_target endpoint=ompt_scope_end device_num=[[DEVICE_NUM]] code=[[CODE1]] -/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=1 -/// device_num=[[DEVICE_NUM]] +/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=ompt_target endpoint=ompt_scope_begin device_num=[[DEVICE_NUM]] /// CHECK-NOT: code=(nil) /// CHECK: code=[[CODE2:.*]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device /// CHECK: code=[[CODE2]] /// CHECK: Callback Submit: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] req_num_teams=0 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete /// CHECK: code=[[CODE2]] -/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=2 device_num=[[DEVICE_NUM]] code=[[CODE2]] +/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=ompt_target endpoint=ompt_scope_end device_num=[[DEVICE_NUM]] code=[[CODE2]] /// CHECK: Callback Fini: diff --git a/offload/test/ompt/veccopy_data.c b/offload/test/ompt/veccopy_data.c index 059ca97..9df5374 100644 --- a/offload/test/ompt/veccopy_data.c +++ b/offload/test/ompt/veccopy_data.c @@ -1,6 +1,8 @@ +// clang-format off // RUN: %libomptarget-compile-run-and-check-generic // REQUIRES: ompt // REQUIRES: gpu +// clang-format on /* * Example OpenMP program that registers EMI callbacks. @@ -73,85 +75,86 @@ int main() { return rc; } +// clang-format off /// CHECK-NOT: Callback Target EMI: /// CHECK-NOT: device_num=-1 /// CHECK: Callback Init: /// CHECK: Callback Load: -/// CHECK: Callback Target EMI: kind=2 endpoint=1 +/// CHECK: Callback Target EMI: kind=ompt_target_enter_data endpoint=ompt_scope_begin /// CHECK-NOT: device_num=-1 /// CHECK-NOT: code=(nil) /// CHECK: code=[[CODE1:.*]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_alloc /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_alloc /// CHECK-NOT: dest=(nil) /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=2 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_to_device /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=2 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_to_device /// CHECK: code=[[CODE1]] -/// CHECK: Callback Target EMI: kind=2 endpoint=2 +/// CHECK: Callback Target EMI: kind=ompt_target_enter_data endpoint=ompt_scope_end /// CHECK-NOT: device_num=-1 /// CHECK: code=[[CODE1]] -/// CHECK: Callback Target EMI: kind=1 endpoint=1 +/// CHECK: Callback Target EMI: kind=ompt_target endpoint=ompt_scope_begin /// CHECK-NOT: device_num=-1 /// CHECK-NOT: code=(nil) /// CHECK: code=[[CODE2:.*]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_alloc /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_alloc /// CHECK-NOT: dest=(nil) /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=2 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_to_device /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=2 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_to_device /// CHECK: code=[[CODE2]] -/// CHECK: Callback Submit EMI: endpoint=1 req_num_teams=1 -/// CHECK: Callback Submit EMI: endpoint=2 req_num_teams=1 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 +/// CHECK: Callback Submit EMI: endpoint=ompt_scope_begin req_num_teams=1 +/// CHECK: Callback Submit EMI: endpoint=ompt_scope_end req_num_teams=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=4 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_delete /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=4 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_delete /// CHECK: code=[[CODE2]] -/// CHECK: Callback Target EMI: kind=1 endpoint=2 +/// CHECK: Callback Target EMI: kind=ompt_target endpoint=ompt_scope_end /// CHECK-NOT: device_num=-1 /// CHECK: code=[[CODE2]] -/// CHECK: Callback Target EMI: kind=3 endpoint=1 +/// CHECK: Callback Target EMI: kind=ompt_target_exit_data endpoint=ompt_scope_begin /// CHECK-NOT: device_num=-1 /// CHECK-NOT: code=(nil) /// CHECK: code=[[CODE3:.*]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE3]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE3]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=4 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_delete /// CHECK: code=[[CODE3]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=4 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_delete /// CHECK: code=[[CODE3]] -/// CHECK: Callback Target EMI: kind=3 endpoint=2 +/// CHECK: Callback Target EMI: kind=ompt_target_exit_data endpoint=ompt_scope_end /// CHECK-NOT: device_num=-1 /// CHECK: code=[[CODE3]] -/// CHECK: Callback Target EMI: kind=1 endpoint=1 +/// CHECK: Callback Target EMI: kind=ompt_target endpoint=ompt_scope_begin /// CHECK-NOT: device_num=-1 /// CHECK-NOT: code=(nil) /// CHECK: code=[[CODE4:.*]] -/// CHECK: Callback Submit EMI: endpoint=1 req_num_teams=1 -/// CHECK: Callback Submit EMI: endpoint=2 req_num_teams=1 -/// CHECK: Callback Target EMI: kind=1 endpoint=2 +/// CHECK: Callback Submit EMI: endpoint=ompt_scope_begin req_num_teams=1 +/// CHECK: Callback Submit EMI: endpoint=ompt_scope_end req_num_teams=1 +/// CHECK: Callback Target EMI: kind=ompt_target endpoint=ompt_scope_end /// CHECK-NOT: device_num=-1 /// CHECK: code=[[CODE4]] -/// CHECK: Callback Target EMI: kind=4 endpoint=1 +/// CHECK: Callback Target EMI: kind=ompt_target_update endpoint=ompt_scope_begin /// CHECK-NOT: device_num=-1 /// CHECK-NOT: code=(nil) /// CHECK: code=[[CODE5:.*]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE5]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE5]] -/// CHECK: Callback Target EMI: kind=4 endpoint=2 +/// CHECK: Callback Target EMI: kind=ompt_target_update endpoint=ompt_scope_end /// CHECK-NOT: device_num=-1 /// CHECK: code=[[CODE5]] /// CHECK: Callback Fini: diff --git a/offload/test/ompt/veccopy_disallow_both.c b/offload/test/ompt/veccopy_disallow_both.c index b531a62..bfc67c5 100644 --- a/offload/test/ompt/veccopy_disallow_both.c +++ b/offload/test/ompt/veccopy_disallow_both.c @@ -1,6 +1,8 @@ +// clang-format off // RUN: %libomptarget-compile-run-and-check-generic // REQUIRES: ompt // REQUIRES: gpu +// clang-format on /* * Example OpenMP program that shows that both EMI and non-EMI @@ -54,48 +56,49 @@ int main() { return rc; } +// clang-format off /// CHECK: Callback Init: /// CHECK: Callback Load: -/// CHECK: Callback Target EMI: kind=1 endpoint=1 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=1 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=1 +/// CHECK: Callback Target EMI: kind=ompt_target endpoint=ompt_scope_begin +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_alloc +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_alloc /// CHECK-NOT: dest=(nil) -/// CHECK: Callback DataOp EMI: endpoint=1 optype=2 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=2 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=1 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_to_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_to_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_alloc +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_alloc /// CHECK-NOT: dest=(nil) -/// CHECK: Callback DataOp EMI: endpoint=1 optype=2 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=2 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_to_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_to_device /// CHECK: Callback Submit: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] req_num_teams=1 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=4 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=4 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=4 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=4 -/// CHECK: Callback Target EMI: kind=1 endpoint=2 -/// CHECK: Callback Target EMI: kind=1 endpoint=1 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=1 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_delete +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_delete +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_delete +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_delete +/// CHECK: Callback Target EMI: kind=ompt_target endpoint=ompt_scope_end +/// CHECK: Callback Target EMI: kind=ompt_target endpoint=ompt_scope_begin +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_alloc +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_alloc /// CHECK-NOT: dest=(nil) -/// CHECK: Callback DataOp EMI: endpoint=1 optype=2 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=2 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=1 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_to_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_to_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_alloc +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_alloc /// CHECK-NOT: dest=(nil) -/// CHECK: Callback DataOp EMI: endpoint=1 optype=2 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=2 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_to_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_to_device /// CHECK: Callback Submit: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] req_num_teams=0 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=4 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=4 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=4 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=4 -/// CHECK: Callback Target EMI: kind=1 endpoint=2 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_delete +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_delete +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_delete +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_delete +/// CHECK: Callback Target EMI: kind=ompt_target endpoint=ompt_scope_end /// CHECK: Callback Fini: diff --git a/offload/test/ompt/veccopy_emi.c b/offload/test/ompt/veccopy_emi.c index 2c57a85..a1427b8 100644 --- a/offload/test/ompt/veccopy_emi.c +++ b/offload/test/ompt/veccopy_emi.c @@ -1,6 +1,8 @@ +// clang-format off // RUN: %libomptarget-compile-run-and-check-generic // REQUIRES: ompt // REQUIRES: gpu +// clang-format on /* * Example OpenMP program that registers EMI callbacks @@ -52,89 +54,90 @@ int main() { return rc; } +// clang-format off /// CHECK: Callback Init: /// CHECK: Callback Load: -/// CHECK: Callback Target EMI: kind=1 endpoint=1 +/// CHECK: Callback Target EMI: kind=ompt_target endpoint=ompt_scope_begin /// CHECK-NOT: code=(nil) /// CHECK: code=[[CODE1:.*]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_alloc /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_alloc /// CHECK-NOT: dest=(nil) /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=2 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_to_device /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=2 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_to_device /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_alloc /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_alloc /// CHECK-NOT: dest=(nil) /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=2 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_to_device /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=2 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_to_device /// CHECK: code=[[CODE1]] -/// CHECK: Callback Submit EMI: endpoint=1 req_num_teams=1 -/// CHECK: Callback Submit EMI: endpoint=2 req_num_teams=1 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 +/// CHECK: Callback Submit EMI: endpoint=ompt_scope_begin req_num_teams=1 +/// CHECK: Callback Submit EMI: endpoint=ompt_scope_end req_num_teams=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=4 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_delete /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=4 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_delete /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=4 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_delete /// CHECK: code=[[CODE1]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=4 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_delete /// CHECK: code=[[CODE1]] -/// CHECK: Callback Target EMI: kind=1 endpoint=2 +/// CHECK: Callback Target EMI: kind=ompt_target endpoint=ompt_scope_end /// CHECK: code=[[CODE1]] -/// CHECK: Callback Target EMI: kind=1 endpoint=1 +/// CHECK: Callback Target EMI: kind=ompt_target endpoint=ompt_scope_begin /// CHECK-NOT: code=(nil) /// CHECK: code=[[CODE2:.*]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_alloc /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_alloc /// CHECK-NOT: dest=(nil) /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=2 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_to_device /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=2 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_to_device /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_alloc /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_alloc /// CHECK-NOT: dest=(nil) /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=2 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_to_device /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=2 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_to_device /// CHECK: code=[[CODE2]] -/// CHECK: Callback Submit EMI: endpoint=1 req_num_teams=0 -/// CHECK: Callback Submit EMI: endpoint=2 req_num_teams=0 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 +/// CHECK: Callback Submit EMI: endpoint=ompt_scope_begin req_num_teams=0 +/// CHECK: Callback Submit EMI: endpoint=ompt_scope_end req_num_teams=0 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=4 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_delete /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=4 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_delete /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=1 optype=4 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_delete /// CHECK: code=[[CODE2]] -/// CHECK: Callback DataOp EMI: endpoint=2 optype=4 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_delete /// CHECK: code=[[CODE2]] -/// CHECK: Callback Target EMI: kind=1 endpoint=2 +/// CHECK: Callback Target EMI: kind=ompt_target endpoint=ompt_scope_end /// CHECK: code=[[CODE2]] /// CHECK: Callback Fini: diff --git a/offload/test/ompt/veccopy_emi_map.c b/offload/test/ompt/veccopy_emi_map.c index fa18a43..450faa1 100644 --- a/offload/test/ompt/veccopy_emi_map.c +++ b/offload/test/ompt/veccopy_emi_map.c @@ -1,6 +1,8 @@ +// clang-format off // RUN: %libomptarget-compile-run-and-check-generic // REQUIRES: ompt // REQUIRES: gpu +// clang-format on /* * Example OpenMP program that shows that map-EMI callbacks are not supported. @@ -52,51 +54,52 @@ int main() { return rc; } +// clang-format off /// CHECK: 0: Could not register callback 'ompt_callback_target_map_emi' /// CHECK: Callback Init: /// CHECK: Callback Load: -/// CHECK: Callback Target EMI: kind=1 endpoint=1 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=1 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=1 +/// CHECK: Callback Target EMI: kind=ompt_target endpoint=ompt_scope_begin +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_alloc +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_alloc /// CHECK-NOT: dest=(nil) -/// CHECK: Callback DataOp EMI: endpoint=1 optype=2 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=2 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=1 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_to_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_to_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_alloc +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_alloc /// CHECK-NOT: dest=(nil) -/// CHECK: Callback DataOp EMI: endpoint=1 optype=2 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=2 -/// CHECK: Callback Submit EMI: endpoint=1 req_num_teams=1 -/// CHECK: Callback Submit EMI: endpoint=2 req_num_teams=1 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=4 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=4 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=4 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=4 -/// CHECK: Callback Target EMI: kind=1 endpoint=2 -/// CHECK: Callback Target EMI: kind=1 endpoint=1 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=1 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_to_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_to_device +/// CHECK: Callback Submit EMI: endpoint=ompt_scope_begin req_num_teams=1 +/// CHECK: Callback Submit EMI: endpoint=ompt_scope_end req_num_teams=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_delete +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_delete +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_delete +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_delete +/// CHECK: Callback Target EMI: kind=ompt_target endpoint=ompt_scope_end +/// CHECK: Callback Target EMI: kind=ompt_target endpoint=ompt_scope_begin +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_alloc +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_alloc /// CHECK-NOT: dest=(nil) -/// CHECK: Callback DataOp EMI: endpoint=1 optype=2 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=2 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=1 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=1 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_to_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_to_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_alloc +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_alloc /// CHECK-NOT: dest=(nil) -/// CHECK: Callback DataOp EMI: endpoint=1 optype=2 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=2 -/// CHECK: Callback Submit EMI: endpoint=1 req_num_teams=0 -/// CHECK: Callback Submit EMI: endpoint=2 req_num_teams=0 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=3 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=3 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=4 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=4 -/// CHECK: Callback DataOp EMI: endpoint=1 optype=4 -/// CHECK: Callback DataOp EMI: endpoint=2 optype=4 -/// CHECK: Callback Target EMI: kind=1 endpoint=2 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_to_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_to_device +/// CHECK: Callback Submit EMI: endpoint=ompt_scope_begin req_num_teams=0 +/// CHECK: Callback Submit EMI: endpoint=ompt_scope_end req_num_teams=0 +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_delete +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_delete +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_begin optype=ompt_target_data_delete +/// CHECK: Callback DataOp EMI: endpoint=ompt_scope_end optype=ompt_target_data_delete +/// CHECK: Callback Target EMI: kind=ompt_target endpoint=ompt_scope_end /// CHECK: Callback Fini: diff --git a/offload/test/ompt/veccopy_map.c b/offload/test/ompt/veccopy_map.c index 2e817d3..12e141e 100644 --- a/offload/test/ompt/veccopy_map.c +++ b/offload/test/ompt/veccopy_map.c @@ -1,6 +1,8 @@ +// clang-format off // RUN: %libomptarget-compile-run-and-check-generic // REQUIRES: ompt // REQUIRES: gpu +// clang-format on /* * Example OpenMP program that shows that map callbacks are not supported. @@ -51,31 +53,31 @@ int main() { return rc; } - +// clang-format off /// CHECK: 0: Could not register callback 'ompt_callback_target_map' /// CHECK: Callback Init: /// CHECK: Callback Load: -/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=1 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 +/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=ompt_target endpoint=ompt_scope_begin +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device /// CHECK: Callback Submit: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] req_num_teams=1 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 -/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=2 - -/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=1 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete +/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=ompt_target endpoint=ompt_scope_end + +/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=ompt_target endpoint=ompt_scope_begin +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device /// CHECK: Callback Submit: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] req_num_teams=0 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 -/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=2 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete +/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=ompt_target endpoint=ompt_scope_end /// CHECK: Callback Fini: diff --git a/offload/test/ompt/veccopy_no_device_init.c b/offload/test/ompt/veccopy_no_device_init.c index 8ee8243..ade06fc 100644 --- a/offload/test/ompt/veccopy_no_device_init.c +++ b/offload/test/ompt/veccopy_no_device_init.c @@ -1,6 +1,7 @@ // clang-format off // RUN: %libomptarget-compile-run-and-check-generic // REQUIRES: ompt +// clang-format on /* * Example OpenMP program that shows that if no device init callback @@ -51,30 +52,31 @@ int main() { return rc; } + // clang-format off /// CHECK-NOT: Callback Init: /// CHECK: Callback Load: -/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=1 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 +/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=ompt_target endpoint=ompt_scope_begin +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device /// CHECK: Callback Submit: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] req_num_teams=1 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 -/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=2 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete +/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=ompt_target endpoint=ompt_scope_end -/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=1 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 +/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=ompt_target endpoint=ompt_scope_begin +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device /// CHECK: Callback Submit: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] req_num_teams=0 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 -/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 -/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=2 +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete +/// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete +/// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=ompt_target endpoint=ompt_scope_end /// CHECK-NOT: Callback Fini: diff --git a/offload/test/ompt/veccopy_wrong_return.c b/offload/test/ompt/veccopy_wrong_return.c index 2d07b4e..17327f3 100644 --- a/offload/test/ompt/veccopy_wrong_return.c +++ b/offload/test/ompt/veccopy_wrong_return.c @@ -1,5 +1,7 @@ +// clang-format off // RUN: %libomptarget-compile-run-and-check-generic // REQUIRES: ompt +// clang-format on /* * Example OpenMP program that shows that if the initialize function @@ -51,29 +53,30 @@ int main() { return rc; } +// clang-format off /// CHECK-NOT: Callback Init: /// CHECK-NOT: Callback Load: -/// CHECK-NOT: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=1 -/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 -/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 -/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 -/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 +/// CHECK-NOT: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=ompt_target endpoint=ompt_scope_begin +/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc +/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device +/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc +/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device /// CHECK-NOT: Callback Submit: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] req_num_teams=1 -/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 -/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 -/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 -/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 -/// CHECK-NOT: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=2 +/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device +/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device +/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete +/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete +/// CHECK-NOT: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=ompt_target endpoint=ompt_scope_end -/// CHECK-NOT: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=1 -/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 -/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 -/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 -/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 +/// CHECK-NOT: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=ompt_target endpoint=ompt_scope_begin +/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc +/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device +/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_alloc +/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_to_device /// CHECK-NOT: Callback Submit: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] req_num_teams=0 -/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 -/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 -/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 -/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 -/// CHECK-NOT: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=2 +/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device +/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_transfer_from_device +/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete +/// CHECK-NOT: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=ompt_target_data_delete +/// CHECK-NOT: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=ompt_target endpoint=ompt_scope_end /// CHECK-NOT: Callback Fini diff --git a/polly/docs/ReleaseNotes.rst b/polly/docs/ReleaseNotes.rst index f7c9689..f5ea47b 100644 --- a/polly/docs/ReleaseNotes.rst +++ b/polly/docs/ReleaseNotes.rst @@ -11,3 +11,5 @@ In Polly |version| the following important changes have been incorporated. the new features that have recently been committed to our development branch. + * ScopInliner has been updated for the New Pass Manager. + diff --git a/polly/include/polly/LinkAllPasses.h b/polly/include/polly/LinkAllPasses.h index c3b68a7..9978344c 100644 --- a/polly/include/polly/LinkAllPasses.h +++ b/polly/include/polly/LinkAllPasses.h @@ -119,7 +119,7 @@ struct PollyForcePassLinking { namespace llvm { void initializeCodePreparationPass(llvm::PassRegistry &); -void initializeScopInlinerPass(llvm::PassRegistry &); +void initializeScopInlinerWrapperPassPass(llvm::PassRegistry &); void initializeScopDetectionWrapperPassPass(llvm::PassRegistry &); void initializeScopDetectionPrinterLegacyPassPass(llvm::PassRegistry &); void initializeScopInfoRegionPassPass(PassRegistry &); diff --git a/polly/include/polly/ScopInliner.h b/polly/include/polly/ScopInliner.h new file mode 100644 index 0000000..0146678 --- /dev/null +++ b/polly/include/polly/ScopInliner.h @@ -0,0 +1,34 @@ +//===------ ScopInliner.h ------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef POLLY_POLLYINLINER_H +#define POLLY_POLLYINLINER_H + +#include "llvm/Analysis/CGSCCPassManager.h" +#include "llvm/Analysis/LazyCallGraph.h" +#include "llvm/IR/PassManager.h" + +namespace polly { +class ScopInlinerPass : public llvm::PassInfoMixin<ScopInlinerPass> { +public: + ScopInlinerPass(); + + llvm::PreservedAnalyses run(llvm::LazyCallGraph::SCC &C, + llvm::CGSCCAnalysisManager &AM, + llvm::LazyCallGraph &CG, + llvm::CGSCCUpdateResult &UR); +}; + +llvm::Pass *createScopInlinerWrapperPass(); +} // namespace polly + +namespace llvm { +void initializeScopInlinerWrapperPassPass(llvm::PassRegistry &); +} + +#endif /* POLLY_POLLYINLINER_H */ diff --git a/polly/lib/Support/PollyPasses.def b/polly/lib/Support/PollyPasses.def index e068f31..2c792a5 100644 --- a/polly/lib/Support/PollyPasses.def +++ b/polly/lib/Support/PollyPasses.def @@ -1,3 +1,9 @@ +#ifndef CGSCC_PASS +#define CGSCC_PASS(NAME, CREATE_PASS, PARSER) +#endif +CGSCC_PASS("polly-inline", ScopInlinerPass(), parseNoOptions) +#undef CGSCC_PASS + #ifndef FUNCTION_ANALYSIS #define FUNCTION_ANALYSIS(NAME, CREATE_PASS) #endif diff --git a/polly/lib/Support/RegisterPasses.cpp b/polly/lib/Support/RegisterPasses.cpp index 0420dff..04f8715 100644 --- a/polly/lib/Support/RegisterPasses.cpp +++ b/polly/lib/Support/RegisterPasses.cpp @@ -35,6 +35,7 @@ #include "polly/ScopDetection.h" #include "polly/ScopGraphPrinter.h" #include "polly/ScopInfo.h" +#include "polly/ScopInliner.h" #include "polly/Simplify.h" #include "polly/Support/DumpFunctionPass.h" #include "polly/Support/DumpModulePass.h" @@ -46,10 +47,13 @@ #include "llvm/Passes/PassBuilder.h" #include "llvm/Passes/PassPlugin.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/Error.h" #include "llvm/Support/TargetSelect.h" #include "llvm/Transforms/IPO.h" +using namespace llvm; namespace cl = llvm::cl; +using namespace polly; using llvm::FunctionPassManager; using llvm::OptimizationLevel; @@ -233,7 +237,7 @@ void initializePollyPasses(llvm::PassRegistry &Registry) { initializePollyCanonicalizePass(Registry); initializeScopDetectionWrapperPassPass(Registry); initializeScopDetectionPrinterLegacyPassPass(Registry); - initializeScopInlinerPass(Registry); + initializeScopInlinerWrapperPassPass(Registry); initializeScopInfoRegionPassPass(Registry); initializeScopInfoPrinterLegacyRegionPassPass(Registry); initializeScopInfoWrapperPassPass(Registry); @@ -434,6 +438,16 @@ static void buildLatePollyPipeline(FunctionPassManager &PM, false); } +static llvm::Expected<std::monostate> parseNoOptions(StringRef Params) { + if (!Params.empty()) + return make_error<StringError>( + formatv("'{0}' passed to pass that does not take any options", Params) + .str(), + inconvertibleErrorCode()); + + return std::monostate{}; +} + static OwningScopAnalysisManagerFunctionProxy createScopAnalyses(FunctionAnalysisManager &FAM, PassInstrumentationCallbacks *PIC) { @@ -461,6 +475,23 @@ static void registerFunctionAnalyses(FunctionAnalysisManager &FAM, FAM.registerPass([&FAM, PIC] { return createScopAnalyses(FAM, PIC); }); } +static llvm::Expected<bool> +parseCGPipeline(StringRef Name, llvm::CGSCCPassManager &CGPM, + PassInstrumentationCallbacks *PIC, + ArrayRef<PassBuilder::PipelineElement> Pipeline) { +#define CGSCC_PASS(NAME, CREATE_PASS, PARSER) \ + if (PassBuilder::checkParametrizedPassName(Name, NAME)) { \ + auto Params = PassBuilder::parsePassParameters(PARSER, Name, NAME); \ + if (!Params) \ + return Params.takeError(); \ + CGPM.addPass(CREATE_PASS); \ + return true; \ + } +#include "PollyPasses.def" + + return false; +} + static bool parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM, ArrayRef<PassBuilder::PipelineElement> Pipeline) { @@ -598,6 +629,12 @@ void registerPollyPasses(PassBuilder &PB) { ArrayRef<PassBuilder::PipelineElement> Pipeline) -> bool { return parseScopPipeline(Name, FPM, PIC, Pipeline); }); + PB.registerPipelineParsingCallback( + [PIC](StringRef Name, CGSCCPassManager &CGPM, + ArrayRef<PassBuilder::PipelineElement> Pipeline) -> bool { + ExitOnError Err("Unable to parse Polly call graph pass: "); + return Err(parseCGPipeline(Name, CGPM, PIC, Pipeline)); + }); PB.registerParseTopLevelPipelineCallback( [PIC](llvm::ModulePassManager &MPM, ArrayRef<PassBuilder::PipelineElement> Pipeline) -> bool { diff --git a/polly/lib/Transform/ScopInliner.cpp b/polly/lib/Transform/ScopInliner.cpp index b78206c..c04ba34 100644 --- a/polly/lib/Transform/ScopInliner.cpp +++ b/polly/lib/Transform/ScopInliner.cpp @@ -13,10 +13,14 @@ // //===----------------------------------------------------------------------===// -#include "polly/LinkAllPasses.h" +#include "polly/ScopInliner.h" #include "polly/ScopDetection.h" +#include "polly/ScopInliner.h" #include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/CallGraphSCCPass.h" +#include "llvm/Analysis/OptimizationRemarkEmitter.h" +#include "llvm/Analysis/RegionInfo.h" +#include "llvm/IR/Dominators.h" #include "llvm/IR/PassManager.h" #include "llvm/Passes/PassBuilder.h" #include "llvm/Transforms/IPO/AlwaysInliner.h" @@ -28,13 +32,77 @@ using namespace llvm; using namespace polly; namespace { -class ScopInliner final : public CallGraphSCCPass { + +/// Inliner implementation that works with both, LPM (using SCC_t=CallGraph) and +/// NPM (using SCC_t=LazyCallGraph::SCC) +template <typename SCC_t> bool runScopInlinerImpl(Function *F, SCC_t &SCC) { + // We do not try to inline non-trivial SCCs because this would lead to + // "infinite" inlining if we are not careful. + if (SCC.size() > 1) + return false; + assert(SCC.size() == 1 && "found empty SCC"); + + // If the function is a nullptr, or the function is a declaration. + if (!F) + return false; + if (F->isDeclaration()) { + POLLY_DEBUG(dbgs() << "Skipping " << F->getName() + << "because it is a declaration.\n"); + return false; + } + + PassBuilder PB; + // Populate analysis managers and register Polly-specific analyses. + LoopAnalysisManager LAM; + FunctionAnalysisManager FAM; + CGSCCAnalysisManager CGAM; + ModuleAnalysisManager MAM; + PB.registerModuleAnalyses(MAM); + PB.registerCGSCCAnalyses(CGAM); + PB.registerFunctionAnalyses(FAM); + PB.registerLoopAnalyses(LAM); + PB.crossRegisterProxies(LAM, FAM, CGAM, MAM); + + auto &DT = FAM.getResult<DominatorTreeAnalysis>(*F); + auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(*F); + auto &LI = FAM.getResult<LoopAnalysis>(*F); + auto &RI = FAM.getResult<RegionInfoAnalysis>(*F); + auto &AA = FAM.getResult<AAManager>(*F); + auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F); + ScopDetection SD(DT, SE, LI, RI, AA, ORE); + SD.detect(*F); + + const bool HasScopAsTopLevelRegion = + SD.ValidRegions.contains(RI.getTopLevelRegion()); + + bool Changed = false; + if (HasScopAsTopLevelRegion) { + POLLY_DEBUG(dbgs() << "Skipping " << F->getName() + << " has scop as top level region"); + F->addFnAttr(llvm::Attribute::AlwaysInline); + + ModulePassManager MPM; + MPM.addPass(AlwaysInlinerPass()); + Module *M = F->getParent(); + assert(M && "Function has illegal module"); + PreservedAnalyses PA = MPM.run(*M, MAM); + if (!PA.areAllPreserved()) + Changed = true; + } else { + POLLY_DEBUG(dbgs() << F->getName() + << " does NOT have scop as top level region\n"); + } + + return Changed; +} + +class ScopInlinerWrapperPass final : public CallGraphSCCPass { using llvm::Pass::doInitialization; public: static char ID; - ScopInliner() : CallGraphSCCPass(ID) {} + ScopInlinerWrapperPass() : CallGraphSCCPass(ID) {} bool doInitialization(CallGraph &CG) override { if (!polly::PollyAllowFullFunction) { @@ -50,60 +118,8 @@ public: } bool runOnSCC(CallGraphSCC &SCC) override { - // We do not try to inline non-trivial SCCs because this would lead to - // "infinite" inlining if we are not careful. - if (SCC.size() > 1) - return false; - assert(SCC.size() == 1 && "found empty SCC"); Function *F = (*SCC.begin())->getFunction(); - - // If the function is a nullptr, or the function is a declaration. - if (!F) - return false; - if (F->isDeclaration()) { - POLLY_DEBUG(dbgs() << "Skipping " << F->getName() - << "because it is a declaration.\n"); - return false; - } - - PassBuilder PB; - // Populate analysis managers and register Polly-specific analyses. - LoopAnalysisManager LAM; - FunctionAnalysisManager FAM; - CGSCCAnalysisManager CGAM; - ModuleAnalysisManager MAM; - FAM.registerPass([] { return ScopAnalysis(); }); - PB.registerModuleAnalyses(MAM); - PB.registerCGSCCAnalyses(CGAM); - PB.registerFunctionAnalyses(FAM); - PB.registerLoopAnalyses(LAM); - PB.crossRegisterProxies(LAM, FAM, CGAM, MAM); - - RegionInfo &RI = FAM.getResult<RegionInfoAnalysis>(*F); - ScopDetection &SD = FAM.getResult<ScopAnalysis>(*F); - - const bool HasScopAsTopLevelRegion = - SD.ValidRegions.contains(RI.getTopLevelRegion()); - - bool Changed = false; - if (HasScopAsTopLevelRegion) { - POLLY_DEBUG(dbgs() << "Skipping " << F->getName() - << " has scop as top level region"); - F->addFnAttr(llvm::Attribute::AlwaysInline); - - ModulePassManager MPM; - MPM.addPass(AlwaysInlinerPass()); - Module *M = F->getParent(); - assert(M && "Function has illegal module"); - PreservedAnalyses PA = MPM.run(*M, MAM); - if (!PA.areAllPreserved()) - Changed = true; - } else { - POLLY_DEBUG(dbgs() << F->getName() - << " does NOT have scop as top level region\n"); - } - - return Changed; + return runScopInlinerImpl(F, SCC); }; void getAnalysisUsage(AnalysisUsage &AU) const override { @@ -111,18 +127,39 @@ public: } }; } // namespace -char ScopInliner::ID; +char ScopInlinerWrapperPass::ID; -Pass *polly::createScopInlinerPass() { - ScopInliner *pass = new ScopInliner(); +Pass *polly::createScopInlinerWrapperPass() { + ScopInlinerWrapperPass *pass = new ScopInlinerWrapperPass(); return pass; } INITIALIZE_PASS_BEGIN( - ScopInliner, "polly-scop-inliner", + ScopInlinerWrapperPass, "polly-scop-inliner", "inline functions based on how much of the function is a scop.", false, false) INITIALIZE_PASS_END( - ScopInliner, "polly-scop-inliner", + ScopInlinerWrapperPass, "polly-scop-inliner", "inline functions based on how much of the function is a scop.", false, false) + +polly::ScopInlinerPass::ScopInlinerPass() { + if (!polly::PollyAllowFullFunction) { + report_fatal_error( + "Aborting from ScopInliner because it only makes sense to run with " + "-polly-allow-full-function. " + "The heurtistic for ScopInliner checks that the full function is a " + "Scop, which happens if and only if polly-allow-full-function is " + " enabled. " + " If not, the entry block is not included in the Scop"); + } +} + +PreservedAnalyses polly::ScopInlinerPass::run(llvm::LazyCallGraph::SCC &SCC, + llvm::CGSCCAnalysisManager &AM, + llvm::LazyCallGraph &CG, + llvm::CGSCCUpdateResult &UR) { + Function *F = &SCC.begin()->getFunction(); + bool Changed = runScopInlinerImpl(F, SCC); + return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all(); +} diff --git a/polly/test/ScopInliner/ignore-declares.ll b/polly/test/ScopInliner/ignore-declares.ll index 11722dc..5c0cfa1 100644 --- a/polly/test/ScopInliner/ignore-declares.ll +++ b/polly/test/ScopInliner/ignore-declares.ll @@ -1,5 +1,4 @@ -; RUN: opt %loadPolly -polly-detect-full-functions -polly-scop-inliner \ -; RUN: -polly-scops -disable-output < %s +; RUN: opt %loadNPMPolly -polly-detect-full-functions '-passes=cgscc(polly-inline),function(print<polly-function-scops>)' -disable-output < %s ; Check that we do not crash if there are declares. We should skip function ; declarations and not try to query for domtree. diff --git a/polly/test/ScopInliner/invariant-load-func.ll b/polly/test/ScopInliner/invariant-load-func.ll index ffd2ec9..58c556a 100644 --- a/polly/test/ScopInliner/invariant-load-func.ll +++ b/polly/test/ScopInliner/invariant-load-func.ll @@ -1,12 +1,9 @@ -; RUN: opt %loadNPMPolly -polly-detect-full-functions -polly-scop-inliner \ -; RUN: -polly-invariant-load-hoisting '-passes=print<polly-function-scops>' -disable-output < %s | FileCheck %s +; RUN: opt %loadNPMPolly -polly-detect-full-functions -polly-invariant-load-hoisting '-passes=cgscc(polly-inline),function(print<polly-function-scops>)' -disable-output < %s 2>&1 | FileCheck %s ; Check that we inline a function that requires invariant load hoisting ; correctly. ; CHECK: Max Loop Depth: 2 -; REQUIRES: pollyacc - ; void to_be_inlined(int A[], int *begin, int *end) { ; for(int i = *begin; i < *end; i++) { diff --git a/polly/test/ScopInliner/simple-inline-loop.ll b/polly/test/ScopInliner/simple-inline-loop.ll index a5e3483..f12798a 100644 --- a/polly/test/ScopInliner/simple-inline-loop.ll +++ b/polly/test/ScopInliner/simple-inline-loop.ll @@ -1,5 +1,4 @@ -; RUN: opt %loadPolly -polly-detect-full-functions -polly-scop-inliner \ -; RUN: -polly-print-scops -disable-output < %s | FileCheck %s +; RUN: opt %loadNPMPolly -polly-detect-full-functions '-passes=cgscc(polly-inline),function(print<polly-function-scops>)' -disable-output < %s 2>&1 | FileCheck %s ; Check that we get the 2 nested loops by inlining `to_be_inlined` into ; `inline_site`. diff --git a/utils/bazel/llvm-project-overlay/mlir/python/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/python/BUILD.bazel index 0a84bd2..43613ad 100644 --- a/utils/bazel/llvm-project-overlay/mlir/python/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/python/BUILD.bazel @@ -1010,6 +1010,38 @@ filegroup( ) ##---------------------------------------------------------------------------## +# Shard dialect. +##---------------------------------------------------------------------------## + +gentbl_filegroup( + name = "ShardOpsPyGen", + tbl_outs = { + "mlir/dialects/_shard_enum_gen.py": [ + "-gen-python-enum-bindings", + "-bind-dialect=shard", + ], + "mlir/dialects/_shard_ops_gen.py": [ + "-gen-python-op-bindings", + "-bind-dialect=shard", + ], + }, + tblgen = "//mlir:mlir-tblgen", + td_file = "mlir/dialects/ShardOps.td", + deps = [ + "//mlir:OpBaseTdFiles", + "//mlir:ShardTdFiles", + ], +) + +filegroup( + name = "ShardOpsPyFiles", + srcs = [ + "mlir/dialects/shard.py", + ":ShardOpsPyGen", + ], +) + +##---------------------------------------------------------------------------## # Shape dialect. ##---------------------------------------------------------------------------## |