aboutsummaryrefslogtreecommitdiff
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/docs/HowToReleaseLLVM.rst55
-rw-r--r--llvm/docs/PDB/HashTable.rst4
-rw-r--r--llvm/include/llvm/Analysis/MemoryProfileInfo.h8
-rw-r--r--llvm/include/llvm/IR/IntrinsicsNVVM.td139
-rw-r--r--llvm/include/llvm/IR/IntrinsicsSPIRV.td8
-rw-r--r--llvm/lib/Analysis/MemoryProfileInfo.cpp28
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.cpp357
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp64
-rw-r--r--llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp3
-rw-r--r--llvm/lib/Target/AArch64/AArch64RegisterInfo.td11
-rw-r--r--llvm/lib/Target/AArch64/AArch64Subtarget.cpp19
-rw-r--r--llvm/lib/Target/AArch64/AArch64Subtarget.h2
-rw-r--r--llvm/lib/Target/AArch64/SMEInstrFormats.td14
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPU.td21
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSubtarget.h4
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp10
-rw-r--r--llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp3
-rw-r--r--llvm/lib/Target/NVPTX/NVPTX.h1
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp76
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.h5
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXInstrInfo.td54
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXIntrinsics.td97
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp134
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp192
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVUtils.cpp6
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVUtils.h3
-rw-r--r--llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp1
-rw-r--r--llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp49
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX512.td90
-rw-r--r--llvm/lib/TargetParser/TargetParser.cpp1
-rw-r--r--llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp9
-rw-r--r--llvm/lib/Transforms/Utils/SCCPSolver.cpp39
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp149
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp214
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp6
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp199
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.h13
-rw-r--r--llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir1009
-rw-r--r--llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll11
-rw-r--r--llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/pal-metadata-3.6-dvgpr.ll204
-rw-r--r--llvm/test/CodeGen/AMDGPU/pal-metadata-3.6.ll13
-rw-r--r--llvm/test/CodeGen/NVPTX/convert-sm103a.ll297
-rw-r--r--llvm/test/CodeGen/NVPTX/wmma-ptx87-sm120a.py2
-rw-r--r--llvm/test/CodeGen/NVPTX/wmma.py115
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/test_counters.ll65
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir14
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/legalize-undef-vec-scaling.mir32
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec256.mir23
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec512.mir23
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier.mir77
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec256.mir23
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec512.mir23
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/select-freeze.mir77
-rw-r--r--llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll24
-rw-r--r--llvm/test/ThinLTO/X86/memprof-basic.ll5
-rw-r--r--llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll4
-rw-r--r--llvm/test/Transforms/PGOProfile/memprof.ll19
-rw-r--r--llvm/test/Transforms/SCCP/relax-range-checks.ll24
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll12
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/ext-used-scalar-different-bitwidth.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll32
-rw-r--r--llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll55
-rw-r--r--llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll12
-rw-r--r--llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll213
-rw-r--r--llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s8
-rw-r--r--llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2.s4
-rw-r--r--llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2vl.s8
-rw-r--r--llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2.s4
-rw-r--r--llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2vl.s8
-rw-r--r--llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2.s4
-rw-r--r--llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2vl.s8
-rw-r--r--llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2.s4
-rw-r--r--llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2vl.s8
-rw-r--r--llvm/tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp6
-rw-r--r--llvm/unittests/Analysis/MemoryProfileInfoTest.cpp21
77 files changed, 2545 insertions, 2053 deletions
diff --git a/llvm/docs/HowToReleaseLLVM.rst b/llvm/docs/HowToReleaseLLVM.rst
index f3792e3..1795d3a 100644
--- a/llvm/docs/HowToReleaseLLVM.rst
+++ b/llvm/docs/HowToReleaseLLVM.rst
@@ -116,13 +116,11 @@ Branch the Git trunk using the following procedure:
#. Bump the version in trunk to N.0.0git with the script in
``llvm/utils/release/bump-version.py``, and tag the commit with llvmorg-N-init.
- If ``X`` is the version to be released, then ``N`` is ``X + 1``.
+ If ``X`` is the version to be released, then ``N`` is ``X + 1``. ::
-::
-
- $ git tag -sa llvmorg-N-init
+ $ git tag -sa llvmorg-N-init
-4. Clear the release notes in trunk with the script in
+#. Clear the release notes in trunk with the script in
``llvm/utils/release/clear-release-notes.py``.
#. Create the release branch from the last known good revision from before the
@@ -145,10 +143,12 @@ Tag release candidates:
$ git tag -sa llvmorg-X.Y.Z-rcN
The pre-packaged source tarballs will be automatically generated via the
-"Release Sources" workflow on GitHub. This workflow will create an artifact
-containing all the release tarballs and the artifact attestation. The
-Release Manager should download the artifact, verify the tarballs, sign them,
-and then upload them to the release page.
+`Release Sources
+<https://github.com/llvm/llvm-project/actions/workflows/release-sources.yml>`_
+workflow on GitHub. This workflow will create an artifact containing all the
+release tarballs and the artifact attestation. The Release Manager should
+download the artifact, verify the tarballs, sign them, and then upload them to
+the release page.
::
@@ -217,8 +217,9 @@ consistently validated and released binaries for their targets/OSs. To contact
them, you should post on the `Discourse forums (Project
Infrastructure - Release Testers). <https://discourse.llvm.org/c/infrastructure/release-testers/66>`_
-The official testers list is in the file ``RELEASE_TESTERS.TXT``, in the ``LLVM``
-repository.
+The official testers list is in the file `RELEASE_TESTERS.TXT
+<https://github.com/llvm/llvm-project/blob/main/llvm/RELEASE_TESTERS.TXT>`_, in
+the LLVM repository.
Community Testing
-----------------
@@ -276,7 +277,8 @@ from the Milestone. Debugging can continue, but on trunk.
Backport Requests
-----------------
-Instructions for requesting a backport to a stable branch can be found :doc:`here <GitHub>`.
+Instructions for requesting a backport to a stable branch can be found
+:ref:`here <backporting>`.
Triaging Bug Reports for Releases
---------------------------------
@@ -301,26 +303,19 @@ This section describes how to triage bug reports:
using the /cherry-pick or /branch comments if this has not been done already.
#. If a bug has been fixed and has a pull request created for backporting it,
- then update its status to "Needs Review" and notify a knowledgeable reviewer.
- Usually you will want to notify the person who approved the patch in Phabricator,
- but you may use your best judgement on who a good reviewer would be. Once
- you have identified the reviewer(s), assign the issue to them and mention
- them (i.e @username) in a comment and ask them if the patch is safe to backport.
- You should also review the bug yourself to ensure that it meets the requirements
- for committing to the release branch.
+ then update its status to "Needs Review" and notify a knowledgeable
+ reviewer. Usually you will want to notify the person who approved the
+ patch, but you may use your best judgement on who a good reviewer would be.
+ Once you have identified the reviewer(s), assign the issue to them and
+ mention them (i.e @username) in a comment and ask them if the patch is safe
+ to backport. You should also review the bug yourself to ensure that it
+ meets the requirements for committing to the release branch.
#. Once a bug has been reviewed, add the release:reviewed label and update the
issue's status to "Needs Merge". Check the pull request associated with the
issue. If all the tests pass, then the pull request can be merged. If not,
then add a comment on the issue asking someone to take a look at the failures.
-#. Once the pull request has been merged push it to the official release branch
- with the script ``llvm/utils/git/sync-release-repo.sh``.
-
- Then add a comment to the issue stating that the fix has been merged along with
- the git hashes from the release branch. Add the release:merged label to the issue
- and close it.
-
Release Patch Rules
-------------------
@@ -364,9 +359,8 @@ Update Documentation
Review the documentation in the release branch and ensure that it is up
to date. The "Release Notes" must be updated to reflect new features, bug
fixes, new known issues, and changes in the list of supported platforms.
-The "Getting Started Guide" should be updated to reflect the new release
-version number tag available from Subversion and changes in basic system
-requirements.
+The :doc:`GettingStarted` page should be updated to reflect the new release
+version number tag and changes in basic system requirements.
.. _tag:
@@ -386,7 +380,8 @@ Update the LLVM Website
The website must be updated before the release announcement is sent out. Here
is what to do:
-#. Check out the ``www-releases`` module from GitHub.
+#. Check out the `www-releases <https://github.com/llvm/www-releases>`_ repo
+ from GitHub.
#. Create a new sub-directory ``X.Y.Z`` in the releases directory.
diff --git a/llvm/docs/PDB/HashTable.rst b/llvm/docs/PDB/HashTable.rst
index 581ec59..7420510 100644
--- a/llvm/docs/PDB/HashTable.rst
+++ b/llvm/docs/PDB/HashTable.rst
@@ -17,8 +17,8 @@ a consumer to read a list of values and reconstruct the hash table on the fly.
The serialization format supports hash tables of arbitrarily large size and
capacity, as well as value types and hash functions. The only supported key
value type is a uint32. The only requirement is that the producer and consumer
-agree on the hash function. As such, the hash function can is not discussed
-further in this document, it is assumed that for a particular instance of a PDB
+agree on the hash function. As such, the hash function is not discussed
+further in this document. It is assumed that for a particular instance of a PDB
file hash table, the appropriate hash function is being used.
On-Disk Format
diff --git a/llvm/include/llvm/Analysis/MemoryProfileInfo.h b/llvm/include/llvm/Analysis/MemoryProfileInfo.h
index 571caf9..be690a4 100644
--- a/llvm/include/llvm/Analysis/MemoryProfileInfo.h
+++ b/llvm/include/llvm/Analysis/MemoryProfileInfo.h
@@ -59,6 +59,14 @@ LLVM_ABI std::string getAllocTypeAttributeString(AllocationType Type);
/// True if the AllocTypes bitmask contains just a single type.
LLVM_ABI bool hasSingleAllocType(uint8_t AllocTypes);
+/// Removes any existing "ambiguous" memprof attribute. Called before we apply a
+/// specific allocation type such as "cold", "notcold", or "hot".
+LLVM_ABI void removeAnyExistingAmbiguousAttribute(CallBase *CB);
+
+/// Adds an "ambiguous" memprof attribute to call with a matched allocation
+/// profile but that we haven't yet been able to disambiguate.
+LLVM_ABI void addAmbiguousAttribute(CallBase *CB);
+
/// Class to build a trie of call stack contexts for a particular profiled
/// allocation call, along with their associated allocation types.
/// The allocation will be at the root of the trie, which is then used to
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index 9cfab26..3af1750 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -272,6 +272,10 @@ class WMMA_REGS<string Geom, string Frag, string PtxEltType, bit IsSparse = fals
!eq(gft,"m16n8k16:d:f32") : !listsplat(llvm_float_ty, 4),
!eq(gft,"m16n8k4:c:f32") : !listsplat(llvm_float_ty, 4),
!eq(gft,"m16n8k4:d:f32") : !listsplat(llvm_float_ty, 4),
+ !eq(gft,"m16n8k32:c:f16") : !listsplat(llvm_v2f16_ty, 2),
+ !eq(gft,"m16n8k32:c:f32") : !listsplat(llvm_float_ty, 4),
+ !eq(gft,"m16n8k32:d:f16") : !listsplat(llvm_v2f16_ty, 2),
+ !eq(gft,"m16n8k32:d:f32") : !listsplat(llvm_float_ty, 4),
// wmma fp16 -> fp16/fp32 @ m16n16k16/m8n32k16/m32n8k16
// All other supported geometries use the same fragment format for f32 and
@@ -298,6 +302,21 @@ class WMMA_REGS<string Geom, string Frag, string PtxEltType, bit IsSparse = fals
!eq(gft,"m8n8k4:c:f64") : !listsplat(llvm_double_ty, 2),
!eq(gft,"m8n8k4:d:f64") : !listsplat(llvm_double_ty, 2),
+ !eq(gft,"m16n8k4:a:f64") : !listsplat(llvm_double_ty, 2),
+ !eq(gft,"m16n8k4:b:f64") : [llvm_double_ty],
+ !eq(gft,"m16n8k4:c:f64") : !listsplat(llvm_double_ty, 4),
+ !eq(gft,"m16n8k4:d:f64") : !listsplat(llvm_double_ty, 4),
+
+ !eq(gft,"m16n8k8:a:f64") : !listsplat(llvm_double_ty, 4),
+ !eq(gft,"m16n8k8:b:f64") : !listsplat(llvm_double_ty, 2),
+ !eq(gft,"m16n8k8:c:f64") : !listsplat(llvm_double_ty, 4),
+ !eq(gft,"m16n8k8:d:f64") : !listsplat(llvm_double_ty, 4),
+
+ !eq(gft,"m16n8k16:a:f64") : !listsplat(llvm_double_ty, 8),
+ !eq(gft,"m16n8k16:b:f64") : !listsplat(llvm_double_ty, 4),
+ !eq(gft,"m16n8k16:c:f64") : !listsplat(llvm_double_ty, 4),
+ !eq(gft,"m16n8k16:d:f64") : !listsplat(llvm_double_ty, 4),
+
// wmma bf16 -> s32 @ m16n16k16/m8n32k16/m32n8k16
!eq(gft,"m16n16k16:a:bf16") : !listsplat(llvm_i32_ty, 4),
!eq(gft,"m16n16k16:b:bf16") : !listsplat(llvm_i32_ty, 4),
@@ -378,6 +397,26 @@ class WMMA_REGS<string Geom, string Frag, string PtxEltType, bit IsSparse = fals
!eq(gft,"m16n8k64:c:s32") : !listsplat(llvm_i32_ty, 4),
!eq(gft,"m16n8k64:d:s32") : !listsplat(llvm_i32_ty, 4),
+ // mma e4m3/e5m2 -> f16/f32 @ m16n8k16
+ !eq(gft,"m16n8k16:a:e4m3") : !listsplat(llvm_i32_ty, 2),
+ !eq(gft,"m16n8k16:a:e5m2") : !listsplat(llvm_i32_ty, 2),
+ !eq(gft,"m16n8k16:b:e4m3") : [llvm_i32_ty],
+ !eq(gft,"m16n8k16:b:e5m2") : [llvm_i32_ty],
+ // mma e4m3/e5m2/e3m2/e2m3/e2m1 -> f32 @ m16n8k32
+ !eq(gft,"m16n8k32:a:e4m3") : !listsplat(llvm_i32_ty, 4),
+ !eq(gft,"m16n8k32:a:e5m2") : !listsplat(llvm_i32_ty, 4),
+ !eq(gft,"m16n8k32:a:e3m2") : !listsplat(llvm_i32_ty, 4),
+ !eq(gft,"m16n8k32:a:e2m3") : !listsplat(llvm_i32_ty, 4),
+ !eq(gft,"m16n8k32:a:e2m1") : !listsplat(llvm_i32_ty, 4),
+ !eq(gft,"m16n8k32:b:e4m3") : !listsplat(llvm_i32_ty, 2),
+ !eq(gft,"m16n8k32:b:e5m2") : !listsplat(llvm_i32_ty, 2),
+ !eq(gft,"m16n8k32:b:e3m2") : !listsplat(llvm_i32_ty, 2),
+ !eq(gft,"m16n8k32:b:e2m3") : !listsplat(llvm_i32_ty, 2),
+ !eq(gft,"m16n8k32:b:e2m1") : !listsplat(llvm_i32_ty, 2),
+ // mma e2m1 -> f32 @m16n8k64
+ !eq(gft,"m16n8k64:a:e2m1") : !listsplat(llvm_i32_ty, 4),
+ !eq(gft,"m16n8k64:b:e2m1") : !listsplat(llvm_i32_ty, 2),
+
// wmma/mma b1 -> s32 @ m8n8k128(b1)
!eq(gft,"m8n8k128:a:b1") : [llvm_i32_ty],
!eq(gft,"m8n8k128:b:b1") : [llvm_i32_ty],
@@ -468,7 +507,7 @@ class WMMA_NAME<string ALayout, string BLayout, int Satfinite, string Rnd, strin
# !if(Satfinite, "_satfinite", "");
}
-class MMA_NAME<string ALayout, string BLayout, int Satfinite, string b1op,
+class MMA_NAME<string ALayout, string BLayout, int Satfinite, string b1op, string Kind,
WMMA_REGS A, WMMA_REGS B, WMMA_REGS C, WMMA_REGS D> {
string signature = MMA_SIGNATURE<A, B, C, D>.ret;
string record = "int_nvvm_mma"
@@ -476,6 +515,7 @@ class MMA_NAME<string ALayout, string BLayout, int Satfinite, string b1op,
# "_" # A.geom
# "_" # ALayout
# "_" # BLayout
+ # !if(!ne(Kind, ""), !strconcat("_", !subst("::", "_", Kind)), "")
# !if(Satfinite, "_satfinite", "")
# signature;
}
@@ -601,7 +641,7 @@ class NVVM_MMA_OPS {
["m16n8k16", "m16n8k8"],
["bf16"], [], ["f32"], []>.ret;
list<list<WMMA_REGS>> f64_mma_ops = MMA_OPS<
- ["m8n8k4"],
+ ["m8n8k4", "m16n8k4", "m16n8k8", "m16n8k16"],
["f64"], [], ["f64"], []>.ret;
list<list<WMMA_REGS>> fp_mma_ops = MMA_OPS<
["m8n8k4", "m16n8k8", "m16n8k16"],
@@ -609,6 +649,18 @@ class NVVM_MMA_OPS {
list<list<WMMA_REGS>> int_mma_ops = MMA_OPS<
["m8n8k16", "m16n8k16", "m16n8k32"],
["s8", "u8"], ["s8", "u8"], ["s32"], []>.ret;
+ // m16n8k32 fp8 variants are intersected with f8f6f4 variants
+ // and processed there
+ list<list<WMMA_REGS>> fp8_mma_ops = MMA_OPS<
+ ["m16n8k16"],
+ ["e4m3", "e5m2"], ["e4m3", "e5m2"],
+ ["f16", "f32"], ["f16", "f32"]>.ret;
+ // it also contains e4m3/e5m2 from fp8 variants
+ list<list<WMMA_REGS>> f8f6f4_mma_ops = MMA_OPS<
+ ["m16n8k32"],
+ ["e4m3", "e5m2", "e3m2", "e2m3", "e2m1"],
+ ["e4m3", "e5m2", "e3m2", "e2m3", "e2m1"],
+ ["f16", "f32"], ["f16", "f32"]>.ret;
list<list<WMMA_REGS>> subint_mma_ops = MMA_OPS<
["m8n8k32", "m16n8k32", "m16n8k64"],
["s4", "u4"], ["s4", "u4"], ["s32"], []>.ret;
@@ -617,7 +669,8 @@ class NVVM_MMA_OPS {
["b1"], [], ["s32"], []>.ret;
list<list<WMMA_REGS>> all_mma_ops = !listconcat(
tf32_mma_ops, bf16_mma_ops, f64_mma_ops,
- fp_mma_ops, int_mma_ops, subint_mma_ops, bit_mma_ops);
+ fp_mma_ops, fp8_mma_ops, f8f6f4_mma_ops,
+ int_mma_ops, subint_mma_ops, bit_mma_ops);
list<list<WMMA_REGS>> bf16_mma_sp_ops = MMA_OPS<
["m16n8k16", "m16n8k32"],
@@ -770,7 +823,8 @@ class NVVM_MMA_B1OPS<list<WMMA_REGS> frags> {
// if NVVM_MMA_SUPPORTED<...>.ret then
// def : FOO<>; // The record will only be defined for supported ops.
//
-class NVVM_MMA_SUPPORTED<list<WMMA_REGS> frags, string layout_a, string layout_b, int satf> {
+class NVVM_MMA_SUPPORTED<list<WMMA_REGS> frags, string layout_a, string layout_b,
+ string kind, int satf> {
// MMA ops check both layouts.
string layout = layout_a # ":" # layout_b;
string a_type = frags[0].ptx_elt_type;
@@ -805,10 +859,31 @@ class NVVM_MMA_SUPPORTED<list<WMMA_REGS> frags, string layout_a, string layout_b
!or(!ne(a_type, b_type),
!ne(c_type, d_type))): false,
- // m16n8k8 requires C and D to be the same type.
- !and(!eq(geom, "m16n8k8"),
+ // m16n8k16/m16n8k32 requires C and D to be the same type
+ !and(!or(!eq(geom, "m16n8k16"),
+ !eq(geom, "m16n8k32")),
!ne(c_type, d_type)): false,
+ // Limit kind to valid types and geometries
+ !and(!ne(kind, ""),
+ !or(!ne(geom, "m16n8k32"),
+ !and(!ne(a_type, "e4m3"),
+ !ne(a_type, "e5m2"),
+ !ne(a_type, "e3m2"),
+ !ne(a_type, "e2m3"),
+ !ne(a_type, "e2m1")))): false,
+
+ // Limit m16n8k16/m16n8k32 with no kind to valid types
+ !and(!eq(kind, ""),
+ !or(!eq(geom, "m16n8k16"),
+ !eq(geom, "m16n8k32")),
+ !or(!eq(a_type, "e3m2"),
+ !eq(a_type, "e2m3"),
+ !eq(a_type, "e2m1"),
+ !eq(b_type, "e3m2"),
+ !eq(b_type, "e2m3"),
+ !eq(b_type, "e2m1"))): false,
+
// All other are OK.
true: true
);
@@ -882,9 +957,10 @@ class NVVM_MMA_SP_SUPPORTED<list<WMMA_REGS> frags, string metadata,
!eq(a_type, "tf32")),
!ne(a_type, b_type)): false,
- // m16n8k16 and m16n8k32 requires C and D to be the same type.
+ // m16n8k16, m16n8k32 and m16n8k64 requires C and D to be the same type.
!and(!or(!eq(geom, "m16n8k16"),
- !eq(geom, "m16n8k32")),
+ !eq(geom, "m16n8k32"),
+ !eq(geom, "m16n8k64")),
!ne(c_type, d_type)): false,
!and(!eq(kind, ""),
@@ -1493,6 +1569,18 @@ let TargetPrefix = "nvvm" in {
}
}
+ // RS rounding mode (Stochastic Rounding) conversions for f16x2, bf16x2 types
+ // The last i32 operand provides the random bits for the conversion
+ foreach relu = ["", "_relu"] in {
+ foreach satfinite = ["", "_satfinite"] in {
+ def int_nvvm_ff2f16x2_rs # relu # satfinite : NVVMBuiltin,
+ PureIntrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty]>;
+
+ def int_nvvm_ff2bf16x2_rs # relu # satfinite : NVVMBuiltin,
+ PureIntrinsic<[llvm_v2bf16_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty]>;
+ }
+ }
+
foreach satfinite = ["", "_satfinite"] in {
def int_nvvm_f2tf32_rna # satfinite : NVVMBuiltin,
PureIntrinsic<[llvm_i32_ty], [llvm_float_ty]>;
@@ -1515,6 +1603,15 @@ let TargetPrefix = "nvvm" in {
PureIntrinsic<[llvm_v2f16_ty], [llvm_i16_ty]>;
}
}
+
+ // RS rounding mode (Stochastic Rounding) conversions for f8x4 types
+ // The last i32 operand provides the random bits for the conversion
+ foreach type = ["e4m3x4", "e5m2x4"] in {
+ foreach relu = ["", "_relu"] in {
+ def int_nvvm_f32x4_to_ # type # _rs # relu # _satfinite : NVVMBuiltin,
+ PureIntrinsic<[llvm_v4i8_ty], [llvm_v4f32_ty, llvm_i32_ty]>;
+ }
+ }
// FP4 conversions.
foreach relu = ["", "_relu"] in {
@@ -1524,6 +1621,13 @@ let TargetPrefix = "nvvm" in {
def int_nvvm_e2m1x2_to_f16x2_rn # relu : NVVMBuiltin,
PureIntrinsic<[llvm_v2f16_ty], [llvm_i16_ty]>;
}
+
+ // RS rounding mode (Stochastic Rounding) conversions for f4x4 type
+ // The last i32 operand provides the random bits for the conversion
+ foreach relu = ["", "_relu"] in {
+ def int_nvvm_f32x4_to_e2m1x4_rs # relu # _satfinite : NVVMBuiltin,
+ PureIntrinsic<[llvm_i16_ty], [llvm_v4f32_ty, llvm_i32_ty]>;
+ }
// FP6 conversions.
foreach type = ["e2m3x2", "e3m2x2"] in {
@@ -1535,6 +1639,15 @@ let TargetPrefix = "nvvm" in {
PureIntrinsic<[llvm_v2f16_ty], [llvm_i16_ty]>;
}
}
+
+ // RS rounding mode (Stochastic Rounding) conversions for f6x4 types
+ // The last i32 operand provides the random bits for the conversion
+ foreach type = ["e2m3x4", "e3m2x4"] in {
+ foreach relu = ["", "_relu"] in {
+ def int_nvvm_f32x4_to_ # type # _rs # relu # _satfinite : NVVMBuiltin,
+ PureIntrinsic<[llvm_v4i8_ty], [llvm_v4f32_ty, llvm_i32_ty]>;
+ }
+ }
// UE8M0x2 conversions.
foreach rmode = ["_rz", "_rp"] in {
@@ -2215,10 +2328,12 @@ foreach layout_a = ["row", "col"] in {
foreach satf = [0, 1] in {
foreach op = NVVM_MMA_OPS.all_mma_ops in {
foreach b1op = NVVM_MMA_B1OPS<op>.ret in {
- if NVVM_MMA_SUPPORTED<op, layout_a, layout_b, satf>.ret then {
- def MMA_NAME<layout_a, layout_b, satf, b1op, op[0], op[1], op[2], op[3]>.record
- : NVVM_MMA<op[0], op[1], op[2], op[3]>;
- }
+ foreach kind = ["", "kind::f8f6f4"] in {
+ if NVVM_MMA_SUPPORTED<op, layout_a, layout_b, kind, satf>.ret then {
+ def MMA_NAME<layout_a, layout_b, satf, b1op, kind, op[0], op[1], op[2], op[3]>.record
+ : NVVM_MMA<op[0], op[1], op[2], op[3]>;
+ }
+ } // kind
} // b1op
} // op
} // satf
diff --git a/llvm/include/llvm/IR/IntrinsicsSPIRV.td b/llvm/include/llvm/IR/IntrinsicsSPIRV.td
index 823c491..66e24fa 100644
--- a/llvm/include/llvm/IR/IntrinsicsSPIRV.td
+++ b/llvm/include/llvm/IR/IntrinsicsSPIRV.td
@@ -150,6 +150,14 @@ def int_spv_rsqrt : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty]
[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
llvm_i32_ty, llvm_ptr_ty],
[IntrNoMem]>;
+ def int_spv_resource_counterhandlefromimplicitbinding
+ : DefaultAttrsIntrinsic<[llvm_any_ty],
+ [llvm_any_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_spv_resource_counterhandlefrombinding
+ : DefaultAttrsIntrinsic<[llvm_any_ty],
+ [llvm_any_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
def int_spv_firstbituhigh : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_anyint_ty], [IntrNoMem]>;
def int_spv_firstbitshigh : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_anyint_ty], [IntrNoMem]>;
diff --git a/llvm/lib/Analysis/MemoryProfileInfo.cpp b/llvm/lib/Analysis/MemoryProfileInfo.cpp
index 0c1f8db..92a5b6f 100644
--- a/llvm/lib/Analysis/MemoryProfileInfo.cpp
+++ b/llvm/lib/Analysis/MemoryProfileInfo.cpp
@@ -54,6 +54,10 @@ cl::opt<unsigned> MinPercentMaxColdSize(
"memprof-min-percent-max-cold-size", cl::init(100), cl::Hidden,
cl::desc("Min percent of max cold bytes for critical cold context"));
+LLVM_ABI cl::opt<bool> MemProfUseAmbiguousAttributes(
+ "memprof-ambiguous-attributes", cl::init(true), cl::Hidden,
+ cl::desc("Apply ambiguous memprof attribute to ambiguous allocations"));
+
} // end namespace llvm
bool llvm::memprof::metadataIncludesAllContextSizeInfo() {
@@ -125,6 +129,26 @@ bool llvm::memprof::hasSingleAllocType(uint8_t AllocTypes) {
return NumAllocTypes == 1;
}
+void llvm::memprof::removeAnyExistingAmbiguousAttribute(CallBase *CB) {
+ if (!CB->hasFnAttr("memprof"))
+ return;
+ assert(CB->getFnAttr("memprof").getValueAsString() == "ambiguous");
+ CB->removeFnAttr("memprof");
+}
+
+void llvm::memprof::addAmbiguousAttribute(CallBase *CB) {
+ if (!MemProfUseAmbiguousAttributes)
+ return;
+ // We may have an existing ambiguous attribute if we are reanalyzing
+ // after inlining.
+ if (CB->hasFnAttr("memprof")) {
+ assert(CB->getFnAttr("memprof").getValueAsString() == "ambiguous");
+ } else {
+ auto A = llvm::Attribute::get(CB->getContext(), "memprof", "ambiguous");
+ CB->addFnAttr(A);
+ }
+}
+
void CallStackTrie::addCallStack(
AllocationType AllocType, ArrayRef<uint64_t> StackIds,
std::vector<ContextTotalSize> ContextSizeInfo) {
@@ -470,6 +494,9 @@ void CallStackTrie::addSingleAllocTypeAttribute(CallBase *CI, AllocationType AT,
StringRef Descriptor) {
auto AllocTypeString = getAllocTypeAttributeString(AT);
auto A = llvm::Attribute::get(CI->getContext(), "memprof", AllocTypeString);
+ // After inlining we may be able to convert an existing ambiguous allocation
+ // to an unambiguous one.
+ removeAnyExistingAmbiguousAttribute(CI);
CI->addFnAttr(A);
if (MemProfReportHintedSizes) {
std::vector<ContextTotalSize> ContextSizeInfo;
@@ -529,6 +556,7 @@ bool CallStackTrie::buildAndAttachMIBMetadata(CallBase *CI) {
assert(MIBCallStack.size() == 1 &&
"Should only be left with Alloc's location in stack");
CI->setMetadata(LLVMContext::MD_memprof, MDNode::get(Ctx, MIBNodes));
+ addAmbiguousAttribute(CI);
return true;
}
// If there exists corner case that CallStackTrie has one chain to leaf
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 4357264d..c76689f 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -345,12 +345,6 @@ static unsigned getStackHazardSize(const MachineFunction &MF) {
return MF.getSubtarget<AArch64Subtarget>().getStreamingHazardSize();
}
-/// Returns true if PPRs are spilled as ZPRs.
-static bool arePPRsSpilledAsZPR(const MachineFunction &MF) {
- return MF.getSubtarget().getRegisterInfo()->getSpillSize(
- AArch64::PPRRegClass) == 16;
-}
-
StackOffset
AArch64FrameLowering::getZPRStackSize(const MachineFunction &MF) const {
const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
@@ -1966,8 +1960,7 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters(
StrOpc = RPI.isPaired() ? AArch64::ST1B_2Z_IMM : AArch64::STR_ZXI;
break;
case RegPairInfo::PPR:
- StrOpc =
- Size == 16 ? AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO : AArch64::STR_PXI;
+ StrOpc = AArch64::STR_PXI;
break;
case RegPairInfo::VG:
StrOpc = AArch64::STRXui;
@@ -2178,8 +2171,7 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
LdrOpc = RPI.isPaired() ? AArch64::LD1B_2Z_IMM : AArch64::LDR_ZXI;
break;
case RegPairInfo::PPR:
- LdrOpc = Size == 16 ? AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO
- : AArch64::LDR_PXI;
+ LdrOpc = AArch64::LDR_PXI;
break;
case RegPairInfo::VG:
continue;
@@ -2286,9 +2278,7 @@ static std::optional<int> getLdStFrameID(const MachineInstr &MI,
// Returns true if the LDST MachineInstr \p MI is a PPR access.
static bool isPPRAccess(const MachineInstr &MI) {
- return MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO &&
- MI.getOpcode() != AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO &&
- AArch64::PPRRegClass.contains(MI.getOperand(0).getReg());
+ return AArch64::PPRRegClass.contains(MI.getOperand(0).getReg());
}
// Check if a Hazard slot is needed for the current function, and if so create
@@ -2390,12 +2380,6 @@ void AArch64FrameLowering::determineStackHazardSlot(
return;
}
- if (arePPRsSpilledAsZPR(MF)) {
- LLVM_DEBUG(dbgs() << "SplitSVEObjects is not supported with "
- "-aarch64-enable-zpr-predicate-spills");
- return;
- }
-
// If another calling convention is explicitly set FPRs can't be promoted to
// ZPR callee-saves.
if (!is_contained({CallingConv::C, CallingConv::Fast,
@@ -2519,14 +2503,6 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
continue;
}
- // Always save P4 when PPR spills are ZPR-sized and a predicate above p8 is
- // spilled. If all of p0-p3 are used as return values p4 is must be free
- // to reload p8-p15.
- if (RegInfo->getSpillSize(AArch64::PPRRegClass) == 16 &&
- AArch64::PPR_p8to15RegClass.contains(Reg)) {
- SavedRegs.set(AArch64::P4);
- }
-
// MachO's compact unwind format relies on all registers being stored in
// pairs.
// FIXME: the usual format is actually better if unwinding isn't needed.
@@ -2587,7 +2563,7 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
auto SpillSize = TRI->getSpillSize(*RC);
bool IsZPR = AArch64::ZPRRegClass.contains(Reg);
bool IsPPR = !IsZPR && AArch64::PPRRegClass.contains(Reg);
- if (IsZPR || (IsPPR && arePPRsSpilledAsZPR(MF)))
+ if (IsZPR)
ZPRCSStackSize += SpillSize;
else if (IsPPR)
PPRCSStackSize += SpillSize;
@@ -2902,7 +2878,7 @@ static SVEStackSizes determineSVEStackSizes(MachineFunction &MF,
StackTop += MFI.getObjectSize(FI);
StackTop = alignTo(StackTop, Alignment);
- assert(StackTop < std::numeric_limits<int64_t>::max() &&
+ assert(StackTop < (uint64_t)std::numeric_limits<int64_t>::max() &&
"SVE StackTop far too large?!");
int64_t Offset = -int64_t(StackTop);
@@ -2961,314 +2937,8 @@ static SVEStackSizes determineSVEStackSizes(MachineFunction &MF,
return SVEStack;
}
-/// Attempts to scavenge a register from \p ScavengeableRegs given the used
-/// registers in \p UsedRegs.
-static Register tryScavengeRegister(LiveRegUnits const &UsedRegs,
- BitVector const &ScavengeableRegs,
- Register PreferredReg) {
- if (PreferredReg != AArch64::NoRegister && UsedRegs.available(PreferredReg))
- return PreferredReg;
- for (auto Reg : ScavengeableRegs.set_bits()) {
- if (UsedRegs.available(Reg))
- return Reg;
- }
- return AArch64::NoRegister;
-}
-
-/// Propagates frame-setup/destroy flags from \p SourceMI to all instructions in
-/// \p MachineInstrs.
-static void propagateFrameFlags(MachineInstr &SourceMI,
- ArrayRef<MachineInstr *> MachineInstrs) {
- for (MachineInstr *MI : MachineInstrs) {
- if (SourceMI.getFlag(MachineInstr::FrameSetup))
- MI->setFlag(MachineInstr::FrameSetup);
- if (SourceMI.getFlag(MachineInstr::FrameDestroy))
- MI->setFlag(MachineInstr::FrameDestroy);
- }
-}
-
-/// RAII helper class for scavenging or spilling a register. On construction
-/// attempts to find a free register of class \p RC (given \p UsedRegs and \p
-/// AllocatableRegs), if no register can be found spills \p SpillCandidate to \p
-/// MaybeSpillFI to free a register. The free'd register is returned via the \p
-/// FreeReg output parameter. On destruction, if there is a spill, its previous
-/// value is reloaded. The spilling and scavenging is only valid at the
-/// insertion point \p MBBI, this class should _not_ be used in places that
-/// create or manipulate basic blocks, moving the expected insertion point.
-struct ScopedScavengeOrSpill {
- ScopedScavengeOrSpill(const ScopedScavengeOrSpill &) = delete;
- ScopedScavengeOrSpill(ScopedScavengeOrSpill &&) = delete;
-
- ScopedScavengeOrSpill(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- Register SpillCandidate, const TargetRegisterClass &RC,
- LiveRegUnits const &UsedRegs,
- BitVector const &AllocatableRegs,
- std::optional<int> *MaybeSpillFI,
- Register PreferredReg = AArch64::NoRegister)
- : MBB(MBB), MBBI(MBBI), RC(RC), TII(static_cast<const AArch64InstrInfo &>(
- *MF.getSubtarget().getInstrInfo())),
- TRI(*MF.getSubtarget().getRegisterInfo()) {
- FreeReg = tryScavengeRegister(UsedRegs, AllocatableRegs, PreferredReg);
- if (FreeReg != AArch64::NoRegister)
- return;
- assert(MaybeSpillFI && "Expected emergency spill slot FI information "
- "(attempted to spill in prologue/epilogue?)");
- if (!MaybeSpillFI->has_value()) {
- MachineFrameInfo &MFI = MF.getFrameInfo();
- *MaybeSpillFI = MFI.CreateSpillStackObject(TRI.getSpillSize(RC),
- TRI.getSpillAlign(RC));
- }
- FreeReg = SpillCandidate;
- SpillFI = MaybeSpillFI->value();
- TII.storeRegToStackSlot(MBB, MBBI, FreeReg, false, *SpillFI, &RC, &TRI,
- Register());
- }
-
- bool hasSpilled() const { return SpillFI.has_value(); }
-
- /// Returns the free register (found from scavenging or spilling a register).
- Register freeRegister() const { return FreeReg; }
-
- Register operator*() const { return freeRegister(); }
-
- ~ScopedScavengeOrSpill() {
- if (hasSpilled())
- TII.loadRegFromStackSlot(MBB, MBBI, FreeReg, *SpillFI, &RC, &TRI,
- Register());
- }
-
-private:
- MachineBasicBlock &MBB;
- MachineBasicBlock::iterator MBBI;
- const TargetRegisterClass &RC;
- const AArch64InstrInfo &TII;
- const TargetRegisterInfo &TRI;
- Register FreeReg = AArch64::NoRegister;
- std::optional<int> SpillFI;
-};
-
-/// Emergency stack slots for expanding SPILL_PPR_TO_ZPR_SLOT_PSEUDO and
-/// FILL_PPR_FROM_ZPR_SLOT_PSEUDO.
-struct EmergencyStackSlots {
- std::optional<int> ZPRSpillFI;
- std::optional<int> PPRSpillFI;
- std::optional<int> GPRSpillFI;
-};
-
-/// Registers available for scavenging (ZPR, PPR3b, GPR).
-struct ScavengeableRegs {
- BitVector ZPRRegs;
- BitVector PPR3bRegs;
- BitVector GPRRegs;
-};
-
-static bool isInPrologueOrEpilogue(const MachineInstr &MI) {
- return MI.getFlag(MachineInstr::FrameSetup) ||
- MI.getFlag(MachineInstr::FrameDestroy);
-}
-
-/// Expands:
-/// ```
-/// SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p0, %stack.0, 0
-/// ```
-/// To:
-/// ```
-/// $z0 = CPY_ZPzI_B $p0, 1, 0
-/// STR_ZXI $z0, $stack.0, 0
-/// ```
-/// While ensuring a ZPR ($z0 in this example) is free for the predicate (
-/// spilling if necessary).
-static void expandSpillPPRToZPRSlotPseudo(MachineBasicBlock &MBB,
- MachineInstr &MI,
- const TargetRegisterInfo &TRI,
- LiveRegUnits const &UsedRegs,
- ScavengeableRegs const &SR,
- EmergencyStackSlots &SpillSlots) {
- MachineFunction &MF = *MBB.getParent();
- auto *TII =
- static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
-
- ScopedScavengeOrSpill ZPredReg(
- MF, MBB, MI, AArch64::Z0, AArch64::ZPRRegClass, UsedRegs, SR.ZPRRegs,
- isInPrologueOrEpilogue(MI) ? nullptr : &SpillSlots.ZPRSpillFI);
-
- SmallVector<MachineInstr *, 2> MachineInstrs;
- const DebugLoc &DL = MI.getDebugLoc();
- MachineInstrs.push_back(BuildMI(MBB, MI, DL, TII->get(AArch64::CPY_ZPzI_B))
- .addReg(*ZPredReg, RegState::Define)
- .add(MI.getOperand(0))
- .addImm(1)
- .addImm(0)
- .getInstr());
- MachineInstrs.push_back(BuildMI(MBB, MI, DL, TII->get(AArch64::STR_ZXI))
- .addReg(*ZPredReg)
- .add(MI.getOperand(1))
- .addImm(MI.getOperand(2).getImm())
- .setMemRefs(MI.memoperands())
- .getInstr());
- propagateFrameFlags(MI, MachineInstrs);
-}
-
-/// Expands:
-/// ```
-/// $p0 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0
-/// ```
-/// To:
-/// ```
-/// $z0 = LDR_ZXI %stack.0, 0
-/// $p0 = PTRUE_B 31, implicit $vg
-/// $p0 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
-/// ```
-/// While ensuring a ZPR ($z0 in this example) is free for the predicate (
-/// spilling if necessary). If the status flags are in use at the point of
-/// expansion they are preserved (by moving them to/from a GPR). This may cause
-/// an additional spill if no GPR is free at the expansion point.
-static bool expandFillPPRFromZPRSlotPseudo(
- MachineBasicBlock &MBB, MachineInstr &MI, const TargetRegisterInfo &TRI,
- LiveRegUnits const &UsedRegs, ScavengeableRegs const &SR,
- MachineInstr *&LastPTrue, EmergencyStackSlots &SpillSlots) {
- MachineFunction &MF = *MBB.getParent();
- auto *TII =
- static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
-
- ScopedScavengeOrSpill ZPredReg(
- MF, MBB, MI, AArch64::Z0, AArch64::ZPRRegClass, UsedRegs, SR.ZPRRegs,
- isInPrologueOrEpilogue(MI) ? nullptr : &SpillSlots.ZPRSpillFI);
-
- ScopedScavengeOrSpill PredReg(
- MF, MBB, MI, AArch64::P0, AArch64::PPR_3bRegClass, UsedRegs, SR.PPR3bRegs,
- isInPrologueOrEpilogue(MI) ? nullptr : &SpillSlots.PPRSpillFI,
- /*PreferredReg=*/
- LastPTrue ? LastPTrue->getOperand(0).getReg() : AArch64::NoRegister);
-
- // Elide NZCV spills if we know it is not used.
- bool IsNZCVUsed = !UsedRegs.available(AArch64::NZCV);
- std::optional<ScopedScavengeOrSpill> NZCVSaveReg;
- if (IsNZCVUsed)
- NZCVSaveReg.emplace(
- MF, MBB, MI, AArch64::X0, AArch64::GPR64RegClass, UsedRegs, SR.GPRRegs,
- isInPrologueOrEpilogue(MI) ? nullptr : &SpillSlots.GPRSpillFI);
- SmallVector<MachineInstr *, 4> MachineInstrs;
- const DebugLoc &DL = MI.getDebugLoc();
- MachineInstrs.push_back(BuildMI(MBB, MI, DL, TII->get(AArch64::LDR_ZXI))
- .addReg(*ZPredReg, RegState::Define)
- .add(MI.getOperand(1))
- .addImm(MI.getOperand(2).getImm())
- .setMemRefs(MI.memoperands())
- .getInstr());
- if (IsNZCVUsed)
- MachineInstrs.push_back(
- BuildMI(MBB, MI, DL, TII->get(AArch64::MRS))
- .addReg(NZCVSaveReg->freeRegister(), RegState::Define)
- .addImm(AArch64SysReg::NZCV)
- .addReg(AArch64::NZCV, RegState::Implicit)
- .getInstr());
-
- // Reuse previous ptrue if we know it has not been clobbered.
- if (LastPTrue) {
- assert(*PredReg == LastPTrue->getOperand(0).getReg());
- LastPTrue->moveBefore(&MI);
- } else {
- LastPTrue = BuildMI(MBB, MI, DL, TII->get(AArch64::PTRUE_B))
- .addReg(*PredReg, RegState::Define)
- .addImm(31);
- }
- MachineInstrs.push_back(LastPTrue);
- MachineInstrs.push_back(
- BuildMI(MBB, MI, DL, TII->get(AArch64::CMPNE_PPzZI_B))
- .addReg(MI.getOperand(0).getReg(), RegState::Define)
- .addReg(*PredReg)
- .addReg(*ZPredReg)
- .addImm(0)
- .addReg(AArch64::NZCV, RegState::ImplicitDefine)
- .getInstr());
- if (IsNZCVUsed)
- MachineInstrs.push_back(BuildMI(MBB, MI, DL, TII->get(AArch64::MSR))
- .addImm(AArch64SysReg::NZCV)
- .addReg(NZCVSaveReg->freeRegister())
- .addReg(AArch64::NZCV, RegState::ImplicitDefine)
- .getInstr());
-
- propagateFrameFlags(MI, MachineInstrs);
- return PredReg.hasSpilled();
-}
-
-/// Expands all FILL_PPR_FROM_ZPR_SLOT_PSEUDO and SPILL_PPR_TO_ZPR_SLOT_PSEUDO
-/// operations within the MachineBasicBlock \p MBB.
-static bool expandSMEPPRToZPRSpillPseudos(MachineBasicBlock &MBB,
- const TargetRegisterInfo &TRI,
- ScavengeableRegs const &SR,
- EmergencyStackSlots &SpillSlots) {
- LiveRegUnits UsedRegs(TRI);
- UsedRegs.addLiveOuts(MBB);
- bool HasPPRSpills = false;
- MachineInstr *LastPTrue = nullptr;
- for (MachineInstr &MI : make_early_inc_range(reverse(MBB))) {
- UsedRegs.stepBackward(MI);
- switch (MI.getOpcode()) {
- case AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO:
- if (LastPTrue &&
- MI.definesRegister(LastPTrue->getOperand(0).getReg(), &TRI))
- LastPTrue = nullptr;
- HasPPRSpills |= expandFillPPRFromZPRSlotPseudo(MBB, MI, TRI, UsedRegs, SR,
- LastPTrue, SpillSlots);
- MI.eraseFromParent();
- break;
- case AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO:
- expandSpillPPRToZPRSlotPseudo(MBB, MI, TRI, UsedRegs, SR, SpillSlots);
- MI.eraseFromParent();
- [[fallthrough]];
- default:
- LastPTrue = nullptr;
- break;
- }
- }
-
- return HasPPRSpills;
-}
-
void AArch64FrameLowering::processFunctionBeforeFrameFinalized(
MachineFunction &MF, RegScavenger *RS) const {
-
- AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
- const TargetSubtargetInfo &TSI = MF.getSubtarget();
- const TargetRegisterInfo &TRI = *TSI.getRegisterInfo();
-
- // If predicates spills are 16-bytes we may need to expand
- // SPILL_PPR_TO_ZPR_SLOT_PSEUDO/FILL_PPR_FROM_ZPR_SLOT_PSEUDO.
- if (AFI->hasStackFrame() && TRI.getSpillSize(AArch64::PPRRegClass) == 16) {
- auto ComputeScavengeableRegisters = [&](unsigned RegClassID) {
- BitVector Regs = TRI.getAllocatableSet(MF, TRI.getRegClass(RegClassID));
- assert(Regs.count() > 0 && "Expected scavengeable registers");
- return Regs;
- };
-
- ScavengeableRegs SR{};
- SR.ZPRRegs = ComputeScavengeableRegisters(AArch64::ZPRRegClassID);
- // Only p0-7 are possible as the second operand of cmpne (needed for fills).
- SR.PPR3bRegs = ComputeScavengeableRegisters(AArch64::PPR_3bRegClassID);
- SR.GPRRegs = ComputeScavengeableRegisters(AArch64::GPR64RegClassID);
-
- EmergencyStackSlots SpillSlots;
- for (MachineBasicBlock &MBB : MF) {
- // In the case we had to spill a predicate (in the range p0-p7) to reload
- // a predicate (>= p8), additional spill/fill pseudos will be created.
- // These need an additional expansion pass. Note: There will only be at
- // most two expansion passes, as spilling/filling a predicate in the range
- // p0-p7 never requires spilling another predicate.
- for (int Pass = 0; Pass < 2; Pass++) {
- bool HasPPRSpills =
- expandSMEPPRToZPRSpillPseudos(MBB, TRI, SR, SpillSlots);
- assert((Pass == 0 || !HasPPRSpills) && "Did not expect PPR spills");
- if (!HasPPRSpills)
- break;
- }
- }
- }
-
- MachineFrameInfo &MFI = MF.getFrameInfo();
-
assert(getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown &&
"Upwards growing stack unsupported");
@@ -3279,6 +2949,9 @@ void AArch64FrameLowering::processFunctionBeforeFrameFinalized(
if (!MF.hasEHFunclets())
return;
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ auto *AFI = MF.getInfo<AArch64FunctionInfo>();
+
// Win64 C++ EH needs to allocate space for the catch objects in the fixed
// object area right next to the UnwindHelp object.
WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo();
@@ -4280,18 +3953,10 @@ void AArch64FrameLowering::emitRemarks(
}
unsigned RegTy = StackAccess::AccessType::GPR;
- if (MFI.hasScalableStackID(FrameIdx)) {
- // SPILL_PPR_TO_ZPR_SLOT_PSEUDO and FILL_PPR_FROM_ZPR_SLOT_PSEUDO
- // spill/fill the predicate as a data vector (so are an FPR access).
- if (MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO &&
- MI.getOpcode() != AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO &&
- AArch64::PPRRegClass.contains(MI.getOperand(0).getReg())) {
- RegTy = StackAccess::PPR;
- } else
- RegTy = StackAccess::FPR;
- } else if (AArch64InstrInfo::isFpOrNEON(MI)) {
+ if (MFI.hasScalableStackID(FrameIdx))
+ RegTy = isPPRAccess(MI) ? StackAccess::PPR : StackAccess::FPR;
+ else if (AArch64InstrInfo::isFpOrNEON(MI))
RegTy = StackAccess::FPR;
- }
StackAccesses[ArrIdx].AccessTypes |= RegTy;
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 5a90da1..b8761d97 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -2579,8 +2579,6 @@ unsigned AArch64InstrInfo::getLoadStoreImmIdx(unsigned Opc) {
case AArch64::STZ2Gi:
case AArch64::STZGi:
case AArch64::TAGPstack:
- case AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO:
- case AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO:
return 2;
case AArch64::LD1B_D_IMM:
case AArch64::LD1B_H_IMM:
@@ -4387,8 +4385,6 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
MinOffset = -256;
MaxOffset = 254;
break;
- case AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO:
- case AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO:
case AArch64::LDR_ZXI:
case AArch64::STR_ZXI:
Scale = TypeSize::getScalable(16);
@@ -5098,33 +5094,31 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg)
.addImm(0)
.addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
+ } else if (Subtarget.hasZeroCycleRegMoveGPR64() &&
+ !Subtarget.hasZeroCycleRegMoveGPR32()) {
+ // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
+ MCRegister DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
+ &AArch64::GPR64spRegClass);
+ assert(DestRegX.isValid() && "Destination super-reg not valid");
+ MCRegister SrcRegX =
+ SrcReg == AArch64::WZR
+ ? AArch64::XZR
+ : TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
+ &AArch64::GPR64spRegClass);
+ assert(SrcRegX.isValid() && "Source super-reg not valid");
+ // This instruction is reading and writing X registers. This may upset
+ // the register scavenger and machine verifier, so we need to indicate
+ // that we are reading an undefined value from SrcRegX, but a proper
+ // value from SrcReg.
+ BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
+ .addReg(AArch64::XZR)
+ .addReg(SrcRegX, RegState::Undef)
+ .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
} else {
- if (Subtarget.hasZeroCycleRegMoveGPR64() &&
- !Subtarget.hasZeroCycleRegMoveGPR32()) {
- // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
- MCRegister DestRegX = TRI->getMatchingSuperReg(
- DestReg, AArch64::sub_32, &AArch64::GPR64spRegClass);
- assert(DestRegX.isValid() && "Destination super-reg not valid");
- MCRegister SrcRegX =
- SrcReg == AArch64::WZR
- ? AArch64::XZR
- : TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
- &AArch64::GPR64spRegClass);
- assert(SrcRegX.isValid() && "Source super-reg not valid");
- // This instruction is reading and writing X registers. This may upset
- // the register scavenger and machine verifier, so we need to indicate
- // that we are reading an undefined value from SrcRegX, but a proper
- // value from SrcReg.
- BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
- .addReg(AArch64::XZR)
- .addReg(SrcRegX, RegState::Undef)
- .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
- } else {
- // Otherwise, expand to ORR WZR.
- BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
- .addReg(AArch64::WZR)
- .addReg(SrcReg, getKillRegState(KillSrc));
- }
+ // Otherwise, expand to ORR WZR.
+ BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
+ .addReg(AArch64::WZR)
+ .addReg(SrcReg, getKillRegState(KillSrc));
}
return;
}
@@ -5650,11 +5644,6 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
"Unexpected register store without SVE store instructions");
Opc = AArch64::STR_ZXI;
StackID = TargetStackID::ScalableVector;
- } else if (AArch64::PPRRegClass.hasSubClassEq(RC)) {
- assert(Subtarget.isSVEorStreamingSVEAvailable() &&
- "Unexpected predicate store without SVE store instructions");
- Opc = AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO;
- StackID = TargetStackID::ScalableVector;
}
break;
case 24:
@@ -5835,11 +5824,6 @@ void AArch64InstrInfo::loadRegFromStackSlot(
"Unexpected register load without SVE load instructions");
Opc = AArch64::LDR_ZXI;
StackID = TargetStackID::ScalableVector;
- } else if (AArch64::PPRRegClass.hasSubClassEq(RC)) {
- assert(Subtarget.isSVEorStreamingSVEAvailable() &&
- "Unexpected predicate load without SVE load instructions");
- Opc = AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO;
- StackID = TargetStackID::ScalableVector;
}
break;
case 24:
diff --git a/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp b/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp
index aed137c..1568161 100644
--- a/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp
+++ b/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp
@@ -57,10 +57,7 @@ static bool isPartOfZPRCalleeSaves(MachineBasicBlock::iterator I) {
case AArch64::ST1B_2Z_IMM:
case AArch64::STR_ZXI:
case AArch64::LDR_ZXI:
- case AArch64::CPY_ZPzI_B:
- case AArch64::CMPNE_PPzZI_B:
case AArch64::PTRUE_C_B:
- case AArch64::PTRUE_B:
return I->getFlag(MachineInstr::FrameSetup) ||
I->getFlag(MachineInstr::FrameDestroy);
case AArch64::SEH_SavePReg:
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.td b/llvm/lib/Target/AArch64/AArch64RegisterInfo.td
index 5d89862..ef974df 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.td
@@ -980,19 +980,10 @@ class ZPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size,
//******************************************************************************
// SVE predicate register classes.
-
-// Note: This hardware mode is enabled in AArch64Subtarget::getHwModeSet()
-// (without the use of the table-gen'd predicates).
-def SMEWithZPRPredicateSpills : HwMode<[Predicate<"false">]>;
-
-def PPRSpillFillRI : RegInfoByHwMode<
- [DefaultMode, SMEWithZPRPredicateSpills],
- [RegInfo<16,16,16>, RegInfo<16,128,128>]>;
-
class PPRClass<int firstreg, int lastreg, int step = 1> : RegisterClass<"AArch64",
[ nxv16i1, nxv8i1, nxv4i1, nxv2i1, nxv1i1 ], 16,
(sequence "P%u", firstreg, lastreg, step)> {
- let RegInfos = PPRSpillFillRI;
+ let Size = 16;
}
def PPR : PPRClass<0, 15> {
diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
index 98e0a11..12ddf47 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
@@ -86,11 +86,6 @@ static cl::alias AArch64StreamingStackHazardSize(
cl::desc("alias for -aarch64-streaming-hazard-size"),
cl::aliasopt(AArch64StreamingHazardSize));
-static cl::opt<bool> EnableZPRPredicateSpills(
- "aarch64-enable-zpr-predicate-spills", cl::init(false), cl::Hidden,
- cl::desc(
- "Enables spilling/reloading SVE predicates as data vectors (ZPRs)"));
-
static cl::opt<unsigned>
VScaleForTuningOpt("sve-vscale-for-tuning", cl::Hidden,
cl::desc("Force a vscale for tuning factor for SVE"));
@@ -426,20 +421,6 @@ AArch64Subtarget::AArch64Subtarget(const Triple &TT, StringRef CPU,
EnableSubregLiveness = EnableSubregLivenessTracking.getValue();
}
-unsigned AArch64Subtarget::getHwModeSet() const {
- AArch64HwModeBits Modes = AArch64HwModeBits::DefaultMode;
-
- // Use a special hardware mode in streaming[-compatible] functions with
- // aarch64-enable-zpr-predicate-spills. This changes the spill size (and
- // alignment) for the predicate register class.
- if (EnableZPRPredicateSpills.getValue() &&
- (isStreaming() || isStreamingCompatible())) {
- Modes |= AArch64HwModeBits::SMEWithZPRPredicateSpills;
- }
-
- return to_underlying(Modes);
-}
-
const CallLowering *AArch64Subtarget::getCallLowering() const {
return CallLoweringInfo.get();
}
diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h
index 671df35..8974965 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.h
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h
@@ -130,8 +130,6 @@ public:
bool IsStreaming = false, bool IsStreamingCompatible = false,
bool HasMinSize = false);
- virtual unsigned getHwModeSet() const override;
-
// Getters for SubtargetFeatures defined in tablegen
#define GET_SUBTARGETINFO_MACRO(ATTRIBUTE, DEFAULT, GETTER) \
bool GETTER() const { return ATTRIBUTE; }
diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td
index be44b8f..33f35ad 100644
--- a/llvm/lib/Target/AArch64/SMEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td
@@ -58,20 +58,6 @@ def FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO :
let hasSideEffects = 0;
}
-def SPILL_PPR_TO_ZPR_SLOT_PSEUDO :
- Pseudo<(outs), (ins PPRorPNRAny:$Pt, GPR64sp:$Rn, simm9:$imm9), []>, Sched<[]>
-{
- let mayStore = 1;
- let hasSideEffects = 0;
-}
-
-def FILL_PPR_FROM_ZPR_SLOT_PSEUDO :
- Pseudo<(outs PPRorPNRAny:$Pt), (ins GPR64sp:$Rn, simm9:$imm9), []>, Sched<[]>
-{
- let mayLoad = 1;
- let hasSideEffects = 0;
-}
-
def SDTZALoadStore : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisPtrTy<1>, SDTCisInt<2>]>;
// SME ZA loads and stores
def AArch64SMELdr : SDNode<"AArch64ISD::SME_ZA_LDR", SDTZALoadStore,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index 9446144..6b3c151 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -1411,20 +1411,6 @@ def FeatureGloballyAddressableScratch : SubtargetFeature<
"FLAT instructions can access scratch memory for any thread in any wave"
>;
-// FIXME: Remove after all users are migrated to attribute.
-def FeatureDynamicVGPR : SubtargetFeature <"dynamic-vgpr",
- "DynamicVGPR",
- "true",
- "Enable dynamic VGPR mode"
->;
-
-// FIXME: Remove after all users are migrated to attribute.
-def FeatureDynamicVGPRBlockSize32 : SubtargetFeature<"dynamic-vgpr-block-size-32",
- "DynamicVGPRBlockSize32",
- "true",
- "Use a block size of 32 for dynamic VGPR allocation (default is 16)"
->;
-
// Enable the use of SCRATCH_STORE/LOAD_BLOCK instructions for saving and
// restoring the callee-saved registers.
def FeatureUseBlockVGPROpsForCSR : SubtargetFeature<"block-vgpr-csr",
@@ -1462,6 +1448,12 @@ def Feature45BitNumRecordsBufferResource : SubtargetFeature< "45-bit-num-records
"The buffer resource (V#) supports 45-bit num_records"
>;
+def FeatureCluster : SubtargetFeature< "cluster",
+ "HasCluster",
+ "true",
+ "Has cluster support"
+>;
+
// Dummy feature used to disable assembler instructions.
def FeatureDisable : SubtargetFeature<"",
"FeatureDisable","true",
@@ -2128,6 +2120,7 @@ def FeatureISAVersion12_50 : FeatureSet<
Feature45BitNumRecordsBufferResource,
FeatureSupportsXNACK,
FeatureXNACK,
+ FeatureCluster,
]>;
def FeatureISAVersion12_51 : FeatureSet<
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index a54d665..879bf5a 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -288,6 +288,8 @@ protected:
bool Has45BitNumRecordsBufferResource = false;
+ bool HasCluster = false;
+
// Dummy feature to use for assembler in tablegen.
bool FeatureDisable = false;
@@ -1837,7 +1839,7 @@ public:
}
/// \returns true if the subtarget supports clusters of workgroups.
- bool hasClusters() const { return GFX1250Insts; }
+ bool hasClusters() const { return HasCluster; }
/// \returns true if the subtarget requires a wait for xcnt before atomic
/// flat/global stores & rmw.
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 20fa141..f7f4d46 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -1353,11 +1353,6 @@ unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI,
if (DynamicVGPRBlockSize != 0)
return DynamicVGPRBlockSize;
- // Temporarily check the subtarget feature, until we fully switch to using
- // attributes.
- if (STI->getFeatureBits().test(FeatureDynamicVGPR))
- return STI->getFeatureBits().test(FeatureDynamicVGPRBlockSize32) ? 32 : 16;
-
bool IsWave32 = EnableWavefrontSize32
? *EnableWavefrontSize32
: STI->getFeatureBits().test(FeatureWavefrontSize32);
@@ -1412,10 +1407,7 @@ unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI,
if (Features.test(FeatureGFX90AInsts))
return 512;
- // Temporarily check the subtarget feature, until we fully switch to using
- // attributes.
- if (DynamicVGPRBlockSize != 0 ||
- STI->getFeatureBits().test(FeatureDynamicVGPR))
+ if (DynamicVGPRBlockSize != 0)
// On GFX12 we can allocate at most 8 blocks of VGPRs.
return 8 * getVGPRAllocGranule(STI, DynamicVGPRBlockSize);
return getAddressableNumArchVGPRs(STI);
diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
index f9bdc09..77913f2 100644
--- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
@@ -149,6 +149,9 @@ void NVPTXInstPrinter::printCvtMode(const MCInst *MI, int OpNum, raw_ostream &O,
case NVPTX::PTXCvtMode::RNA:
O << ".rna";
return;
+ case NVPTX::PTXCvtMode::RS:
+ O << ".rs";
+ return;
}
}
llvm_unreachable("Invalid conversion modifier");
diff --git a/llvm/lib/Target/NVPTX/NVPTX.h b/llvm/lib/Target/NVPTX/NVPTX.h
index 77a0e03..1e0f747 100644
--- a/llvm/lib/Target/NVPTX/NVPTX.h
+++ b/llvm/lib/Target/NVPTX/NVPTX.h
@@ -207,6 +207,7 @@ enum CvtMode {
RM,
RP,
RNA,
+ RS,
BASE_MASK = 0x0F,
FTZ_FLAG = 0x10,
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 8c21746..bc047a4a 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -1096,9 +1096,10 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
// Enable custom lowering for the following:
// * MVT::i128 - clusterlaunchcontrol
// * MVT::i32 - prmt
+ // * MVT::v4f32 - cvt_rs fp{4/6/8}x4 intrinsics
// * MVT::Other - internal.addrspace.wrap
- setOperationAction(ISD::INTRINSIC_WO_CHAIN, {MVT::i32, MVT::i128, MVT::Other},
- Custom);
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN,
+ {MVT::i32, MVT::i128, MVT::v4f32, MVT::Other}, Custom);
}
const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
@@ -1181,6 +1182,11 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT)
MAKE_CASE(
NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT)
+ MAKE_CASE(NVPTXISD::CVT_E4M3X4_F32X4_RS_SF)
+ MAKE_CASE(NVPTXISD::CVT_E5M2X4_F32X4_RS_SF)
+ MAKE_CASE(NVPTXISD::CVT_E2M3X4_F32X4_RS_SF)
+ MAKE_CASE(NVPTXISD::CVT_E3M2X4_F32X4_RS_SF)
+ MAKE_CASE(NVPTXISD::CVT_E2M1X4_F32X4_RS_SF)
}
return nullptr;
@@ -2903,6 +2909,61 @@ static SDValue LowerClusterLaunchControlQueryCancel(SDValue Op,
{TryCancelResponse0, TryCancelResponse1});
}
+static SDValue lowerCvtRSIntrinsics(SDValue Op, SelectionDAG &DAG) {
+ SDNode *N = Op.getNode();
+ SDLoc DL(N);
+ SDValue F32Vec = N->getOperand(1);
+ SDValue RBits = N->getOperand(2);
+
+ unsigned IntrinsicID = N->getConstantOperandVal(0);
+
+ // Extract the 4 float elements from the vector
+ SmallVector<SDValue, 6> Ops;
+ for (unsigned i = 0; i < 4; ++i)
+ Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, F32Vec,
+ DAG.getIntPtrConstant(i, DL)));
+
+ using NVPTX::PTXCvtMode::CvtMode;
+
+ auto [OpCode, RetTy, CvtModeFlag] =
+ [&]() -> std::tuple<NVPTXISD::NodeType, MVT::SimpleValueType, uint32_t> {
+ switch (IntrinsicID) {
+ case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
+ return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8,
+ CvtMode::RS | CvtMode::RELU_FLAG};
+ case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
+ return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
+ case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
+ return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8,
+ CvtMode::RS | CvtMode::RELU_FLAG};
+ case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
+ return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
+ case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
+ return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8,
+ CvtMode::RS | CvtMode::RELU_FLAG};
+ case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
+ return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
+ case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
+ return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8,
+ CvtMode::RS | CvtMode::RELU_FLAG};
+ case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
+ return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
+ case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
+ return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16,
+ CvtMode::RS | CvtMode::RELU_FLAG};
+ case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
+ return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16, CvtMode::RS};
+ default:
+ llvm_unreachable("unsupported/unhandled intrinsic");
+ }
+ }();
+
+ Ops.push_back(RBits);
+ Ops.push_back(DAG.getConstant(CvtModeFlag, DL, MVT::i32));
+
+ return DAG.getNode(OpCode, DL, RetTy, Ops);
+}
+
static SDValue lowerPrmtIntrinsic(SDValue Op, SelectionDAG &DAG) {
const unsigned Mode = [&]() {
switch (Op->getConstantOperandVal(0)) {
@@ -2972,6 +3033,17 @@ static SDValue lowerIntrinsicWOChain(SDValue Op, SelectionDAG &DAG) {
case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
return LowerClusterLaunchControlQueryCancel(Op, DAG);
+ case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
+ case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
+ case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
+ case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
+ case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
+ case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
+ case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
+ case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
+ case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
+ case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
+ return lowerCvtRSIntrinsics(Op, DAG);
}
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
index 769d2fe..63fa0bb 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -79,6 +79,11 @@ enum NodeType : unsigned {
CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_X,
CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Y,
CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Z,
+ CVT_E4M3X4_F32X4_RS_SF,
+ CVT_E5M2X4_F32X4_RS_SF,
+ CVT_E2M3X4_F32X4_RS_SF,
+ CVT_E3M2X4_F32X4_RS_SF,
+ CVT_E2M1X4_F32X4_RS_SF,
FIRST_MEMORY_OPCODE,
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 4cacee2..6c14cf0 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -34,7 +34,8 @@ def CvtRN : PatLeaf<(i32 0x5)>;
def CvtRZ : PatLeaf<(i32 0x6)>;
def CvtRM : PatLeaf<(i32 0x7)>;
def CvtRP : PatLeaf<(i32 0x8)>;
-def CvtRNA : PatLeaf<(i32 0x9)>;
+def CvtRNA : PatLeaf<(i32 0x9)>;
+def CvtRS : PatLeaf<(i32 0xA)>;
def CvtNONE_FTZ : PatLeaf<(i32 0x10)>;
def CvtRNI_FTZ : PatLeaf<(i32 0x11)>;
@@ -50,8 +51,9 @@ def CvtSAT : PatLeaf<(i32 0x20)>;
def CvtSAT_FTZ : PatLeaf<(i32 0x30)>;
def CvtNONE_RELU : PatLeaf<(i32 0x40)>;
-def CvtRN_RELU : PatLeaf<(i32 0x45)>;
-def CvtRZ_RELU : PatLeaf<(i32 0x46)>;
+def CvtRN_RELU : PatLeaf<(i32 0x45)>;
+def CvtRZ_RELU : PatLeaf<(i32 0x46)>;
+def CvtRS_RELU : PatLeaf<(i32 0x4A)>;
def CvtMode : Operand<i32> {
let PrintMethod = "printCvtMode";
@@ -133,6 +135,11 @@ def hasSM100a : Predicate<"Subtarget->getSmVersion() == 100 && Subtarget->hasArc
def hasSM101a : Predicate<"Subtarget->getSmVersion() == 101 && Subtarget->hasArchAccelFeatures()">;
def hasSM120a : Predicate<"Subtarget->getSmVersion() == 120 && Subtarget->hasArchAccelFeatures()">;
+def hasSM100aOrSM103a :
+ Predicate<"(Subtarget->getSmVersion() == 100 || " #
+ "Subtarget->getSmVersion() == 103) " #
+ "&& Subtarget->hasArchAccelFeatures()">;
+
// non-sync shfl instructions are not available on sm_70+ in PTX6.4+
def hasSHFL : Predicate<"!(Subtarget->getSmVersion() >= 70"
"&& Subtarget->getPTXVersion() >= 64)">;
@@ -593,6 +600,23 @@ let hasSideEffects = false in {
defm CVT_f16x2 : CVT_FROM_FLOAT_V2_SM80<"f16x2", B32>;
defm CVT_bf16x2 : CVT_FROM_FLOAT_V2_SM80<"bf16x2", B32>;
+
+ multiclass CVT_FROM_FLOAT_V2_RS<string FromName, RegisterClass RC> {
+ def _f32_rs :
+ BasicFlagsNVPTXInst<(outs RC:$dst),
+ (ins B32:$src1, B32:$src2, B32:$src3),
+ (ins CvtMode:$mode),
+ "cvt${mode:base}${mode:relu}." # FromName # ".f32">;
+
+ def _f32_rs_sf :
+ BasicFlagsNVPTXInst<(outs RC:$dst),
+ (ins B32:$src1, B32:$src2, B32:$src3),
+ (ins CvtMode:$mode),
+ "cvt${mode:base}${mode:relu}.satfinite." # FromName # ".f32">;
+ }
+
+ defm CVT_f16x2 : CVT_FROM_FLOAT_V2_RS<"f16x2", B32>;
+ defm CVT_bf16x2 : CVT_FROM_FLOAT_V2_RS<"bf16x2", B32>;
// FP8 conversions.
multiclass CVT_TO_F8X2<string F8Name> {
@@ -619,6 +643,15 @@ let hasSideEffects = false in {
def CVT_f16x2_e4m3x2 : CVT_f16x2_fp8<"e4m3">;
def CVT_f16x2_e5m2x2 : CVT_f16x2_fp8<"e5m2">;
+
+ class CVT_TO_FP8X4<string F8Name> :
+ NVPTXInst<(outs B32:$dst),
+ (ins B32:$src1, B32:$src2, B32:$src3, B32:$src4, B32:$src5, CvtMode:$mode),
+ "cvt${mode:base}${mode:relu}.satfinite." # F8Name #
+ "x4.f32 \t$dst, {{$src1, $src2, $src3, $src4}}, $src5;">;
+
+ def CVT_e4m3x4_f32x4_rs_sf : CVT_TO_FP8X4<"e4m3">;
+ def CVT_e5m2x4_f32x4_rs_sf : CVT_TO_FP8X4<"e5m2">;
// Float to TF32 conversions
multiclass CVT_TO_TF32<string Modifier, list<Predicate> Preds = [hasPTX<78>, hasSM<90>]> {
@@ -652,6 +685,15 @@ let hasSideEffects = false in {
"cvt${mode:base}${mode:relu}.f16x2." # type>;
}
+ class CVT_TO_FP6X4<string F6Name> :
+ NVPTXInst<(outs B32:$dst),
+ (ins B32:$src1, B32:$src2, B32:$src3, B32:$src4, B32:$src5, CvtMode:$mode),
+ "cvt${mode:base}${mode:relu}.satfinite." # F6Name #
+ "x4.f32 \t$dst, {{$src1, $src2, $src3, $src4}}, $src5;">;
+
+ def CVT_e2m3x4_f32x4_rs_sf : CVT_TO_FP6X4<"e2m3">;
+ def CVT_e3m2x4_f32x4_rs_sf : CVT_TO_FP6X4<"e3m2">;
+
// FP4 conversions.
def CVT_e2m1x2_f32_sf : NVPTXInst<(outs B16:$dst),
(ins B32:$src1, B32:$src2, CvtMode:$mode),
@@ -668,6 +710,12 @@ let hasSideEffects = false in {
"cvt.u8.u16 \t%e2m1x2_in, $src; \n\t",
"cvt${mode:base}${mode:relu}.f16x2.e2m1x2 \t$dst, %e2m1x2_in; \n\t",
"}}"), []>;
+
+ def CVT_e2m1x4_f32x4_rs_sf :
+ NVPTXInst<(outs B16:$dst),
+ (ins B32:$src1, B32:$src2, B32:$src3, B32:$src4, B32:$src5, CvtMode:$mode),
+ "cvt${mode:base}${mode:relu}.satfinite.e2m1x4.f32 \t" #
+ "$dst, {{$src1, $src2, $src3, $src4}}, $src5;">;
// UE8M0x2 conversions.
class CVT_f32_to_ue8m0x2<string sat = ""> :
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index e91171c..a8b854f 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -1782,11 +1782,32 @@ def : Pat<(int_nvvm_ff2bf16x2_rn_relu f32:$a, f32:$b), (CVT_bf16x2_f32 $a, $b, C
def : Pat<(int_nvvm_ff2bf16x2_rz f32:$a, f32:$b), (CVT_bf16x2_f32 $a, $b, CvtRZ)>;
def : Pat<(int_nvvm_ff2bf16x2_rz_relu f32:$a, f32:$b), (CVT_bf16x2_f32 $a, $b, CvtRZ_RELU)>;
+let Predicates = [hasPTX<87>, hasSM100aOrSM103a] in {
+def : Pat<(int_nvvm_ff2bf16x2_rs f32:$a, f32:$b, i32:$c),
+ (CVT_bf16x2_f32_rs $a, $b, $c, CvtRS)>;
+def : Pat<(int_nvvm_ff2bf16x2_rs_relu f32:$a, f32:$b, i32:$c),
+ (CVT_bf16x2_f32_rs $a, $b, $c, CvtRS_RELU)>;
+def : Pat<(int_nvvm_ff2bf16x2_rs_satfinite f32:$a, f32:$b, i32:$c),
+ (CVT_bf16x2_f32_rs_sf $a, $b, $c, CvtRS)>;
+def : Pat<(int_nvvm_ff2bf16x2_rs_relu_satfinite f32:$a, f32:$b, i32:$c),
+ (CVT_bf16x2_f32_rs_sf $a, $b, $c, CvtRS_RELU)>;
+}
+
def : Pat<(int_nvvm_ff2f16x2_rn f32:$a, f32:$b), (CVT_f16x2_f32 $a, $b, CvtRN)>;
def : Pat<(int_nvvm_ff2f16x2_rn_relu f32:$a, f32:$b), (CVT_f16x2_f32 $a, $b, CvtRN_RELU)>;
def : Pat<(int_nvvm_ff2f16x2_rz f32:$a, f32:$b), (CVT_f16x2_f32 $a, $b, CvtRZ)>;
def : Pat<(int_nvvm_ff2f16x2_rz_relu f32:$a, f32:$b), (CVT_f16x2_f32 $a, $b, CvtRZ_RELU)>;
+let Predicates = [hasPTX<87>, hasSM100aOrSM103a] in {
+def : Pat<(int_nvvm_ff2f16x2_rs f32:$a, f32:$b, i32:$c),
+ (CVT_f16x2_f32_rs $a, $b, $c, CvtRS)>;
+def : Pat<(int_nvvm_ff2f16x2_rs_relu f32:$a, f32:$b, i32:$c),
+ (CVT_f16x2_f32_rs $a, $b, $c, CvtRS_RELU)>;
+def : Pat<(int_nvvm_ff2f16x2_rs_satfinite f32:$a, f32:$b, i32:$c),
+ (CVT_f16x2_f32_rs_sf $a, $b, $c, CvtRS)>;
+def : Pat<(int_nvvm_ff2f16x2_rs_relu_satfinite f32:$a, f32:$b, i32:$c),
+ (CVT_f16x2_f32_rs_sf $a, $b, $c, CvtRS_RELU)>;
+}
def : Pat<(int_nvvm_f2bf16_rn f32:$a), (CVT_bf16_f32 $a, CvtRN)>;
def : Pat<(int_nvvm_f2bf16_rn_relu f32:$a), (CVT_bf16_f32 $a, CvtRN_RELU)>;
def : Pat<(int_nvvm_f2bf16_rz f32:$a), (CVT_bf16_f32 $a, CvtRZ)>;
@@ -1929,6 +1950,52 @@ let Predicates = [hasPTX<86>, hasSM<100>, hasArchAccelFeatures] in {
(CVT_bf16x2_ue8m0x2 $a)>;
}
+def SDT_CVT_F32X4_TO_FPX4_RS_VEC :
+ SDTypeProfile<1, 6, [SDTCisVec<0>, SDTCisFP<1>, SDTCisFP<2>, SDTCisFP<3>,
+ SDTCisFP<4>, SDTCisInt<5>, SDTCisInt<6>]>;
+
+def SDT_CVT_F32X4_TO_FPX4_RS_INT :
+ SDTypeProfile<1, 6, [SDTCisInt<0>, SDTCisFP<1>, SDTCisFP<2>, SDTCisFP<3>,
+ SDTCisFP<4>, SDTCisInt<5>, SDTCisInt<6>]>;
+
+class CVT_F32X4_TO_FPX4_RS_SF_NODE<string FPName, SDTypeProfile SDT> :
+ SDNode<"NVPTXISD::CVT_" # FPName # "X4_F32X4_RS_SF", SDT, []>;
+
+multiclass CVT_F32X4_TO_FPX4_RS_SF_VEC<string FPName, VTVec RetTy> {
+ def : Pat<(RetTy (CVT_F32X4_TO_FPX4_RS_SF_NODE<!toupper(FPName),
+ SDT_CVT_F32X4_TO_FPX4_RS_VEC>
+ f32:$f1, f32:$f2, f32:$f3, f32:$f4, i32:$rbits, CvtRS)),
+ (!cast<NVPTXInst>("CVT_" # FPName # "x4_f32x4_rs_sf")
+ $f1, $f2, $f3, $f4, $rbits, CvtRS)>;
+
+ def : Pat<(RetTy (CVT_F32X4_TO_FPX4_RS_SF_NODE<!toupper(FPName),
+ SDT_CVT_F32X4_TO_FPX4_RS_VEC>
+ f32:$f1, f32:$f2, f32:$f3, f32:$f4, i32:$rbits, CvtRS_RELU)),
+ (!cast<NVPTXInst>("CVT_" # FPName # "x4_f32x4_rs_sf")
+ $f1, $f2, $f3, $f4, $rbits, CvtRS_RELU)>;
+}
+
+// RS rounding mode conversions
+let Predicates = [hasPTX<87>, hasSM100aOrSM103a] in {
+// FP8x4 conversions
+defm : CVT_F32X4_TO_FPX4_RS_SF_VEC<"e4m3", v4i8>;
+defm : CVT_F32X4_TO_FPX4_RS_SF_VEC<"e5m2", v4i8>;
+
+// FP6x4 conversions
+defm : CVT_F32X4_TO_FPX4_RS_SF_VEC<"e2m3", v4i8>;
+defm : CVT_F32X4_TO_FPX4_RS_SF_VEC<"e3m2", v4i8>;
+
+// FP4x4 conversions
+def : Pat<(i16 (CVT_F32X4_TO_FPX4_RS_SF_NODE<"E2M1",
+ SDT_CVT_F32X4_TO_FPX4_RS_INT>
+ f32:$f1, f32:$f2, f32:$f3, f32:$f4, i32:$rbits, CvtRS)),
+ (CVT_e2m1x4_f32x4_rs_sf $f1, $f2, $f3, $f4, $rbits, CvtRS)>;
+def : Pat<(i16 (CVT_F32X4_TO_FPX4_RS_SF_NODE<"E2M1",
+ SDT_CVT_F32X4_TO_FPX4_RS_INT>
+ f32:$f1, f32:$f2, f32:$f3, f32:$f4, i32:$rbits, CvtRS_RELU)),
+ (CVT_e2m1x4_f32x4_rs_sf $f1, $f2, $f3, $f4, $rbits, CvtRS_RELU)>;
+}
+
//
// FNS
//
@@ -4461,6 +4528,10 @@ class WMMA_REGINFO<WMMA_REGS r, string op, string metadata = "", string kind = "
!eq(ptx_elt_type, "e2m1"),
!ne(kind, "")) : [hasSM120a, hasPTX<87>],
+ !and(!or(!eq(ptx_elt_type,"e4m3"),
+ !eq(ptx_elt_type,"e5m2")),
+ !eq(geom, "m16n8k16")) : [hasSM<89>, hasPTX<87>],
+
!or(!eq(ptx_elt_type, "e4m3"),
!eq(ptx_elt_type, "e5m2")) : [hasSM<89>, hasPTX<84>],
@@ -4476,6 +4547,11 @@ class WMMA_REGINFO<WMMA_REGS r, string op, string metadata = "", string kind = "
!and(!eq(geom, "m8n8k4"),
!eq(ptx_elt_type, "f64")) : [hasSM<80>, hasPTX<70>],
+ !and(!or(!eq(geom, "m16n8k4"),
+ !eq(geom, "m16n8k8"),
+ !eq(geom, "m16n8k16")),
+ !eq(ptx_elt_type, "f64")) : [hasSM<90>, hasPTX<78>],
+
// fp16 -> fp16/fp32 @ m8n32k16/m32n8k16
!and(!or(!eq(geom, "m8n32k16"),
!eq(geom, "m32n8k16")),
@@ -4760,8 +4836,8 @@ defset list<WMMA_INSTR> WMMAs = {
// MMA
class MMA<WMMA_REGINFO FragA, WMMA_REGINFO FragB,
WMMA_REGINFO FragC, WMMA_REGINFO FragD,
- string ALayout, string BLayout, int Satfinite, string b1op>
- : WMMA_INSTR<MMA_NAME<ALayout, BLayout, Satfinite, b1op, FragA, FragB, FragC, FragD>.record,
+ string ALayout, string BLayout, int Satfinite, string b1op, string Kind>
+ : WMMA_INSTR<MMA_NAME<ALayout, BLayout, Satfinite, b1op, Kind, FragA, FragB, FragC, FragD>.record,
[FragA.Ins, FragB.Ins, FragC.Ins]>,
// Requires does not seem to have effect on Instruction w/o Patterns.
// We set it here anyways and propagate to the Pat<> we construct below.
@@ -4776,6 +4852,7 @@ class MMA<WMMA_REGINFO FragA, WMMA_REGINFO FragB,
# FragA.geom
# "." # ALayout
# "." # BLayout
+ # !if(!ne(Kind, ""), "." # Kind, "")
# !if(Satfinite, ".satfinite", "")
# TypeList
# b1op # "\n\t\t"
@@ -4792,13 +4869,15 @@ defset list<WMMA_INSTR> MMAs = {
foreach satf = [0, 1] in {
foreach op = NVVM_MMA_OPS.all_mma_ops in {
foreach b1op = NVVM_MMA_B1OPS<op>.ret in {
- if NVVM_MMA_SUPPORTED<op, layout_a, layout_b, satf>.ret then {
- def : MMA<WMMA_REGINFO<op[0], "mma">,
- WMMA_REGINFO<op[1], "mma">,
- WMMA_REGINFO<op[2], "mma">,
- WMMA_REGINFO<op[3], "mma">,
- layout_a, layout_b, satf, b1op>;
- }
+ foreach kind = ["", "kind::f8f6f4"] in {
+ if NVVM_MMA_SUPPORTED<op, layout_a, layout_b, kind, satf>.ret then {
+ def : MMA<WMMA_REGINFO<op[0], "mma", "", kind>,
+ WMMA_REGINFO<op[1], "mma", "", kind>,
+ WMMA_REGINFO<op[2], "mma", "", kind>,
+ WMMA_REGINFO<op[3], "mma", "", kind>,
+ layout_a, layout_b, satf, b1op, kind>;
+ }
+ } // kind
} // b1op
} // op
} // satf
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 0afec42..989950f 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -307,6 +307,10 @@ private:
bool selectHandleFromBinding(Register &ResVReg, const SPIRVType *ResType,
MachineInstr &I) const;
+ bool selectCounterHandleFromBinding(Register &ResVReg,
+ const SPIRVType *ResType,
+ MachineInstr &I) const;
+
bool selectReadImageIntrinsic(Register &ResVReg, const SPIRVType *ResType,
MachineInstr &I) const;
bool selectImageWriteIntrinsic(MachineInstr &I) const;
@@ -314,6 +318,8 @@ private:
MachineInstr &I) const;
bool selectModf(Register ResVReg, const SPIRVType *ResType,
MachineInstr &I) const;
+ bool selectUpdateCounter(Register &ResVReg, const SPIRVType *ResType,
+ MachineInstr &I) const;
bool selectFrexp(Register ResVReg, const SPIRVType *ResType,
MachineInstr &I) const;
// Utilities
@@ -3443,6 +3449,10 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
case Intrinsic::spv_resource_handlefrombinding: {
return selectHandleFromBinding(ResVReg, ResType, I);
}
+ case Intrinsic::spv_resource_counterhandlefrombinding:
+ return selectCounterHandleFromBinding(ResVReg, ResType, I);
+ case Intrinsic::spv_resource_updatecounter:
+ return selectUpdateCounter(ResVReg, ResType, I);
case Intrinsic::spv_resource_store_typedbuffer: {
return selectImageWriteIntrinsic(I);
}
@@ -3478,6 +3488,130 @@ bool SPIRVInstructionSelector::selectHandleFromBinding(Register &ResVReg,
*cast<GIntrinsic>(&I), I);
}
+bool SPIRVInstructionSelector::selectCounterHandleFromBinding(
+ Register &ResVReg, const SPIRVType *ResType, MachineInstr &I) const {
+ auto &Intr = cast<GIntrinsic>(I);
+ assert(Intr.getIntrinsicID() ==
+ Intrinsic::spv_resource_counterhandlefrombinding);
+
+ // Extract information from the intrinsic call.
+ Register MainHandleReg = Intr.getOperand(2).getReg();
+ auto *MainHandleDef = cast<GIntrinsic>(getVRegDef(*MRI, MainHandleReg));
+ assert(MainHandleDef->getIntrinsicID() ==
+ Intrinsic::spv_resource_handlefrombinding);
+
+ uint32_t Set = getIConstVal(Intr.getOperand(4).getReg(), MRI);
+ uint32_t Binding = getIConstVal(Intr.getOperand(3).getReg(), MRI);
+ uint32_t ArraySize = getIConstVal(MainHandleDef->getOperand(4).getReg(), MRI);
+ Register IndexReg = MainHandleDef->getOperand(5).getReg();
+ const bool IsNonUniform = false;
+ std::string CounterName =
+ getStringValueFromReg(MainHandleDef->getOperand(6).getReg(), *MRI) +
+ ".counter";
+
+ // Create the counter variable.
+ MachineIRBuilder MIRBuilder(I);
+ Register CounterVarReg = buildPointerToResource(
+ GR.getPointeeType(ResType), GR.getPointerStorageClass(ResType), Set,
+ Binding, ArraySize, IndexReg, IsNonUniform, CounterName, MIRBuilder);
+
+ return BuildCOPY(ResVReg, CounterVarReg, I);
+}
+
+bool SPIRVInstructionSelector::selectUpdateCounter(Register &ResVReg,
+ const SPIRVType *ResType,
+ MachineInstr &I) const {
+ auto &Intr = cast<GIntrinsic>(I);
+ assert(Intr.getIntrinsicID() == Intrinsic::spv_resource_updatecounter);
+
+ Register CounterHandleReg = Intr.getOperand(2).getReg();
+ Register IncrReg = Intr.getOperand(3).getReg();
+
+ // The counter handle is a pointer to the counter variable (which is a struct
+ // containing an i32). We need to get a pointer to that i32 member to do the
+ // atomic operation.
+#ifndef NDEBUG
+ SPIRVType *CounterVarType = GR.getSPIRVTypeForVReg(CounterHandleReg);
+ SPIRVType *CounterVarPointeeType = GR.getPointeeType(CounterVarType);
+ assert(CounterVarPointeeType &&
+ CounterVarPointeeType->getOpcode() == SPIRV::OpTypeStruct &&
+ "Counter variable must be a struct");
+ assert(GR.getPointerStorageClass(CounterVarType) ==
+ SPIRV::StorageClass::StorageBuffer &&
+ "Counter variable must be in the storage buffer storage class");
+ assert(CounterVarPointeeType->getNumOperands() == 2 &&
+ "Counter variable must have exactly 1 member in the struct");
+ const SPIRVType *MemberType =
+ GR.getSPIRVTypeForVReg(CounterVarPointeeType->getOperand(1).getReg());
+ assert(MemberType->getOpcode() == SPIRV::OpTypeInt &&
+ "Counter variable struct must have a single i32 member");
+#endif
+
+ // The struct has a single i32 member.
+ MachineIRBuilder MIRBuilder(I);
+ const Type *LLVMIntType =
+ Type::getInt32Ty(I.getMF()->getFunction().getContext());
+
+ SPIRVType *IntPtrType = GR.getOrCreateSPIRVPointerType(
+ LLVMIntType, MIRBuilder, SPIRV::StorageClass::StorageBuffer);
+
+ auto Zero = buildI32Constant(0, I);
+ if (!Zero.second)
+ return false;
+
+ Register PtrToCounter =
+ MRI->createVirtualRegister(GR.getRegClass(IntPtrType));
+ if (!BuildMI(*I.getParent(), I, I.getDebugLoc(),
+ TII.get(SPIRV::OpAccessChain))
+ .addDef(PtrToCounter)
+ .addUse(GR.getSPIRVTypeID(IntPtrType))
+ .addUse(CounterHandleReg)
+ .addUse(Zero.first)
+ .constrainAllUses(TII, TRI, RBI)) {
+ return false;
+ }
+
+ // For UAV/SSBO counters, the scope is Device. The counter variable is not
+ // used as a flag. So the memory semantics can be None.
+ auto Scope = buildI32Constant(SPIRV::Scope::Device, I);
+ if (!Scope.second)
+ return false;
+ auto Semantics = buildI32Constant(SPIRV::MemorySemantics::None, I);
+ if (!Semantics.second)
+ return false;
+
+ int64_t IncrVal = getIConstValSext(IncrReg, MRI);
+ auto Incr = buildI32Constant(static_cast<uint32_t>(IncrVal), I);
+ if (!Incr.second)
+ return false;
+
+ Register AtomicRes = MRI->createVirtualRegister(GR.getRegClass(ResType));
+ if (!BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpAtomicIAdd))
+ .addDef(AtomicRes)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(PtrToCounter)
+ .addUse(Scope.first)
+ .addUse(Semantics.first)
+ .addUse(Incr.first)
+ .constrainAllUses(TII, TRI, RBI)) {
+ return false;
+ }
+ if (IncrVal >= 0) {
+ return BuildCOPY(ResVReg, AtomicRes, I);
+ }
+
+ // In HLSL, IncrementCounter returns the value *before* the increment, while
+ // DecrementCounter returns the value *after* the decrement. Both are lowered
+ // to the same atomic intrinsic which returns the value *before* the
+ // operation. So for decrements (negative IncrVal), we must subtract the
+ // increment value from the result to get the post-decrement value.
+ return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
+ .addDef(ResVReg)
+ .addUse(GR.getSPIRVTypeID(ResType))
+ .addUse(AtomicRes)
+ .addUse(Incr.first)
+ .constrainAllUses(TII, TRI, RBI);
+}
bool SPIRVInstructionSelector::selectReadImageIntrinsic(
Register &ResVReg, const SPIRVType *ResType, MachineInstr &I) const {
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp
index 205895e..fc14a03 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp
@@ -39,6 +39,10 @@ private:
void collectBindingInfo(Module &M);
uint32_t getAndReserveFirstUnusedBinding(uint32_t DescSet);
void replaceImplicitBindingCalls(Module &M);
+ void replaceResourceHandleCall(Module &M, CallInst *OldCI,
+ uint32_t NewBinding);
+ void replaceCounterHandleCall(Module &M, CallInst *OldCI,
+ uint32_t NewBinding);
void verifyUniqueOrderIdPerResource(SmallVectorImpl<CallInst *> &Calls);
// A map from descriptor set to a bit vector of used binding numbers.
@@ -56,64 +60,93 @@ struct BindingInfoCollector : public InstVisitor<BindingInfoCollector> {
: UsedBindings(UsedBindings), ImplicitBindingCalls(ImplicitBindingCalls) {
}
+ void addBinding(uint32_t DescSet, uint32_t Binding) {
+ if (UsedBindings.size() <= DescSet) {
+ UsedBindings.resize(DescSet + 1);
+ UsedBindings[DescSet].resize(64);
+ }
+ if (UsedBindings[DescSet].size() <= Binding) {
+ UsedBindings[DescSet].resize(2 * Binding + 1);
+ }
+ UsedBindings[DescSet].set(Binding);
+ }
+
void visitCallInst(CallInst &CI) {
if (CI.getIntrinsicID() == Intrinsic::spv_resource_handlefrombinding) {
const uint32_t DescSet =
cast<ConstantInt>(CI.getArgOperand(0))->getZExtValue();
const uint32_t Binding =
cast<ConstantInt>(CI.getArgOperand(1))->getZExtValue();
-
- if (UsedBindings.size() <= DescSet) {
- UsedBindings.resize(DescSet + 1);
- UsedBindings[DescSet].resize(64);
- }
- if (UsedBindings[DescSet].size() <= Binding) {
- UsedBindings[DescSet].resize(2 * Binding + 1);
- }
- UsedBindings[DescSet].set(Binding);
+ addBinding(DescSet, Binding);
} else if (CI.getIntrinsicID() ==
Intrinsic::spv_resource_handlefromimplicitbinding) {
ImplicitBindingCalls.push_back(&CI);
+ } else if (CI.getIntrinsicID() ==
+ Intrinsic::spv_resource_counterhandlefrombinding) {
+ const uint32_t DescSet =
+ cast<ConstantInt>(CI.getArgOperand(2))->getZExtValue();
+ const uint32_t Binding =
+ cast<ConstantInt>(CI.getArgOperand(1))->getZExtValue();
+ addBinding(DescSet, Binding);
+ } else if (CI.getIntrinsicID() ==
+ Intrinsic::spv_resource_counterhandlefromimplicitbinding) {
+ ImplicitBindingCalls.push_back(&CI);
}
}
};
+static uint32_t getOrderId(const CallInst *CI) {
+ uint32_t OrderIdArgIdx = 0;
+ switch (CI->getIntrinsicID()) {
+ case Intrinsic::spv_resource_handlefromimplicitbinding:
+ OrderIdArgIdx = 0;
+ break;
+ case Intrinsic::spv_resource_counterhandlefromimplicitbinding:
+ OrderIdArgIdx = 1;
+ break;
+ default:
+ llvm_unreachable("CallInst is not an implicit binding intrinsic");
+ }
+ return cast<ConstantInt>(CI->getArgOperand(OrderIdArgIdx))->getZExtValue();
+}
+
+static uint32_t getDescSet(const CallInst *CI) {
+ uint32_t DescSetArgIdx;
+ switch (CI->getIntrinsicID()) {
+ case Intrinsic::spv_resource_handlefromimplicitbinding:
+ case Intrinsic::spv_resource_handlefrombinding:
+ DescSetArgIdx = 1;
+ break;
+ case Intrinsic::spv_resource_counterhandlefromimplicitbinding:
+ case Intrinsic::spv_resource_counterhandlefrombinding:
+ DescSetArgIdx = 2;
+ break;
+ default:
+ llvm_unreachable("CallInst is not an implicit binding intrinsic");
+ }
+ return cast<ConstantInt>(CI->getArgOperand(DescSetArgIdx))->getZExtValue();
+}
+
void SPIRVLegalizeImplicitBinding::collectBindingInfo(Module &M) {
BindingInfoCollector InfoCollector(UsedBindings, ImplicitBindingCalls);
InfoCollector.visit(M);
// Sort the collected calls by their order ID.
- std::sort(
- ImplicitBindingCalls.begin(), ImplicitBindingCalls.end(),
- [](const CallInst *A, const CallInst *B) {
- const uint32_t OrderIdArgIdx = 0;
- const uint32_t OrderA =
- cast<ConstantInt>(A->getArgOperand(OrderIdArgIdx))->getZExtValue();
- const uint32_t OrderB =
- cast<ConstantInt>(B->getArgOperand(OrderIdArgIdx))->getZExtValue();
- return OrderA < OrderB;
- });
+ std::sort(ImplicitBindingCalls.begin(), ImplicitBindingCalls.end(),
+ [](const CallInst *A, const CallInst *B) {
+ return getOrderId(A) < getOrderId(B);
+ });
}
void SPIRVLegalizeImplicitBinding::verifyUniqueOrderIdPerResource(
SmallVectorImpl<CallInst *> &Calls) {
// Check that the order Id is unique per resource.
for (uint32_t i = 1; i < Calls.size(); ++i) {
- const uint32_t OrderIdArgIdx = 0;
- const uint32_t DescSetArgIdx = 1;
- const uint32_t OrderA =
- cast<ConstantInt>(Calls[i - 1]->getArgOperand(OrderIdArgIdx))
- ->getZExtValue();
- const uint32_t OrderB =
- cast<ConstantInt>(Calls[i]->getArgOperand(OrderIdArgIdx))
- ->getZExtValue();
+ const uint32_t OrderA = getOrderId(Calls[i - 1]);
+ const uint32_t OrderB = getOrderId(Calls[i]);
if (OrderA == OrderB) {
- const uint32_t DescSetA =
- cast<ConstantInt>(Calls[i - 1]->getArgOperand(DescSetArgIdx))
- ->getZExtValue();
- const uint32_t DescSetB =
- cast<ConstantInt>(Calls[i]->getArgOperand(DescSetArgIdx))
- ->getZExtValue();
+ const uint32_t DescSetA = getDescSet(Calls[i - 1]);
+ const uint32_t DescSetB = getDescSet(Calls[i]);
if (DescSetA != DescSetB) {
report_fatal_error("Implicit binding calls with the same order ID must "
"have the same descriptor set");
@@ -144,36 +177,26 @@ void SPIRVLegalizeImplicitBinding::replaceImplicitBindingCalls(Module &M) {
uint32_t lastBindingNumber = -1;
for (CallInst *OldCI : ImplicitBindingCalls) {
- IRBuilder<> Builder(OldCI);
- const uint32_t OrderId =
- cast<ConstantInt>(OldCI->getArgOperand(0))->getZExtValue();
- const uint32_t DescSet =
- cast<ConstantInt>(OldCI->getArgOperand(1))->getZExtValue();
-
- // Reuse an existing binding for this order ID, if one was already assigned.
- // Otherwise, assign a new binding.
- const uint32_t NewBinding = (lastOrderId == OrderId)
- ? lastBindingNumber
- : getAndReserveFirstUnusedBinding(DescSet);
- lastOrderId = OrderId;
- lastBindingNumber = NewBinding;
-
- SmallVector<Value *, 8> Args;
- Args.push_back(Builder.getInt32(DescSet));
- Args.push_back(Builder.getInt32(NewBinding));
-
- // Copy the remaining arguments from the old call.
- for (uint32_t i = 2; i < OldCI->arg_size(); ++i) {
- Args.push_back(OldCI->getArgOperand(i));
+ const uint32_t OrderId = getOrderId(OldCI);
+ uint32_t BindingNumber;
+ if (OrderId == lastOrderId) {
+ BindingNumber = lastBindingNumber;
+ } else {
+ const uint32_t DescSet = getDescSet(OldCI);
+ BindingNumber = getAndReserveFirstUnusedBinding(DescSet);
}
- Function *NewFunc = Intrinsic::getOrInsertDeclaration(
- &M, Intrinsic::spv_resource_handlefrombinding, OldCI->getType());
- CallInst *NewCI = Builder.CreateCall(NewFunc, Args);
- NewCI->setCallingConv(OldCI->getCallingConv());
-
- OldCI->replaceAllUsesWith(NewCI);
- OldCI->eraseFromParent();
+ if (OldCI->getIntrinsicID() ==
+ Intrinsic::spv_resource_handlefromimplicitbinding) {
+ replaceResourceHandleCall(M, OldCI, BindingNumber);
+ } else {
+ assert(OldCI->getIntrinsicID() ==
+ Intrinsic::spv_resource_counterhandlefromimplicitbinding &&
+ "Unexpected implicit binding intrinsic");
+ replaceCounterHandleCall(M, OldCI, BindingNumber);
+ }
+ lastOrderId = OrderId;
+ lastBindingNumber = BindingNumber;
}
}
@@ -196,4 +219,49 @@ INITIALIZE_PASS(SPIRVLegalizeImplicitBinding, "legalize-spirv-implicit-binding",
ModulePass *llvm::createSPIRVLegalizeImplicitBindingPass() {
return new SPIRVLegalizeImplicitBinding();
-} \ No newline at end of file
+}
+
+void SPIRVLegalizeImplicitBinding::replaceResourceHandleCall(
+ Module &M, CallInst *OldCI, uint32_t NewBinding) {
+ IRBuilder<> Builder(OldCI);
+ const uint32_t DescSet =
+ cast<ConstantInt>(OldCI->getArgOperand(1))->getZExtValue();
+
+ SmallVector<Value *, 8> Args;
+ Args.push_back(Builder.getInt32(DescSet));
+ Args.push_back(Builder.getInt32(NewBinding));
+
+ // Copy the remaining arguments from the old call.
+ for (uint32_t i = 2; i < OldCI->arg_size(); ++i) {
+ Args.push_back(OldCI->getArgOperand(i));
+ }
+
+ Function *NewFunc = Intrinsic::getOrInsertDeclaration(
+ &M, Intrinsic::spv_resource_handlefrombinding, OldCI->getType());
+ CallInst *NewCI = Builder.CreateCall(NewFunc, Args);
+ NewCI->setCallingConv(OldCI->getCallingConv());
+
+ OldCI->replaceAllUsesWith(NewCI);
+ OldCI->eraseFromParent();
+}
+
+void SPIRVLegalizeImplicitBinding::replaceCounterHandleCall(
+ Module &M, CallInst *OldCI, uint32_t NewBinding) {
+ IRBuilder<> Builder(OldCI);
+ const uint32_t DescSet =
+ cast<ConstantInt>(OldCI->getArgOperand(2))->getZExtValue();
+
+ SmallVector<Value *, 8> Args;
+ Args.push_back(OldCI->getArgOperand(0));
+ Args.push_back(Builder.getInt32(NewBinding));
+ Args.push_back(Builder.getInt32(DescSet));
+
+ Type *Tys[] = {OldCI->getType(), OldCI->getArgOperand(0)->getType()};
+ Function *NewFunc = Intrinsic::getOrInsertDeclaration(
+ &M, Intrinsic::spv_resource_counterhandlefrombinding, Tys);
+ CallInst *NewCI = Builder.CreateCall(NewFunc, Args);
+ NewCI->setCallingConv(OldCI->getCallingConv());
+
+ OldCI->replaceAllUsesWith(NewCI);
+ OldCI->eraseFromParent();
+}
diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
index 327c011..1d47c89 100644
--- a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
@@ -385,6 +385,12 @@ uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI) {
return MI->getOperand(1).getCImm()->getValue().getZExtValue();
}
+int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI) {
+ const MachineInstr *MI = getDefInstrMaybeConstant(ConstReg, MRI);
+ assert(MI && MI->getOpcode() == TargetOpcode::G_CONSTANT);
+ return MI->getOperand(1).getCImm()->getSExtValue();
+}
+
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID) {
if (const auto *GI = dyn_cast<GIntrinsic>(&MI))
return GI->is(IntrinsicID);
diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.h b/llvm/lib/Target/SPIRV/SPIRVUtils.h
index 409a0fd..5777a24 100644
--- a/llvm/lib/Target/SPIRV/SPIRVUtils.h
+++ b/llvm/lib/Target/SPIRV/SPIRVUtils.h
@@ -289,6 +289,9 @@ MachineInstr *getDefInstrMaybeConstant(Register &ConstReg,
// Get constant integer value of the given ConstReg.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI);
+// Get constant integer value of the given ConstReg, sign-extended.
+int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI);
+
// Check if MI is a SPIR-V specific intrinsic call.
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID);
// Check if it's a SPIR-V specific intrinsic call.
diff --git a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
index 3090ad3..27fba34 100644
--- a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
+++ b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
@@ -407,6 +407,7 @@ bool X86InstructionSelector::select(MachineInstr &I) {
case TargetOpcode::G_TRUNC:
return selectTruncOrPtrToInt(I, MRI, MF);
case TargetOpcode::G_INTTOPTR:
+ case TargetOpcode::G_FREEZE:
return selectCopy(I, MRI);
case TargetOpcode::G_ZEXT:
return selectZext(I, MRI, MF);
diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
index e7709ef..11ef721 100644
--- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
+++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
@@ -89,9 +89,29 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
// 32/64-bits needs support for s64/s128 to handle cases:
// s64 = EXTEND (G_IMPLICIT_DEF s32) -> s64 = G_IMPLICIT_DEF
// s128 = EXTEND (G_IMPLICIT_DEF s32/s64) -> s128 = G_IMPLICIT_DEF
- getActionDefinitionsBuilder(G_IMPLICIT_DEF)
+ getActionDefinitionsBuilder(
+ {G_IMPLICIT_DEF, G_PHI, G_FREEZE, G_CONSTANT_FOLD_BARRIER})
.legalFor({p0, s1, s8, s16, s32, s64})
- .legalFor(Is64Bit, {s128});
+ .legalFor(UseX87, {s80})
+ .legalFor(Is64Bit, {s128})
+ .legalFor(HasSSE2, {v16s8, v8s16, v4s32, v2s64})
+ .legalFor(HasAVX, {v32s8, v16s16, v8s32, v4s64})
+ .legalFor(HasAVX512, {v64s8, v32s16, v16s32, v8s64})
+ .widenScalarOrEltToNextPow2(0, /*Min=*/8)
+ .clampScalarOrElt(0, s8, sMaxScalar)
+ .moreElementsToNextPow2(0)
+ .clampMinNumElements(0, s8, 16)
+ .clampMinNumElements(0, s16, 8)
+ .clampMinNumElements(0, s32, 4)
+ .clampMinNumElements(0, s64, 2)
+ .clampMaxNumElements(0, s8, HasAVX512 ? 64 : (HasAVX ? 32 : 16))
+ .clampMaxNumElements(0, s16, HasAVX512 ? 32 : (HasAVX ? 16 : 8))
+ .clampMaxNumElements(0, s32, HasAVX512 ? 16 : (HasAVX ? 8 : 4))
+ .clampMaxNumElements(0, s64, HasAVX512 ? 8 : (HasAVX ? 4 : 2))
+ .clampMaxNumElements(0, p0,
+ Is64Bit ? s64MaxVector.getNumElements()
+ : s32MaxVector.getNumElements())
+ .scalarizeIf(scalarOrEltWiderThan(0, 64), 0);
getActionDefinitionsBuilder(G_CONSTANT)
.legalFor({p0, s8, s16, s32})
@@ -289,26 +309,6 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
.clampScalar(1, s16, sMaxScalar)
.scalarSameSizeAs(0, 1);
- // control flow
- getActionDefinitionsBuilder(G_PHI)
- .legalFor({s8, s16, s32, p0})
- .legalFor(UseX87, {s80})
- .legalFor(Is64Bit, {s64})
- .legalFor(HasSSE1, {v16s8, v8s16, v4s32, v2s64})
- .legalFor(HasAVX, {v32s8, v16s16, v8s32, v4s64})
- .legalFor(HasAVX512, {v64s8, v32s16, v16s32, v8s64})
- .clampMinNumElements(0, s8, 16)
- .clampMinNumElements(0, s16, 8)
- .clampMinNumElements(0, s32, 4)
- .clampMinNumElements(0, s64, 2)
- .clampMaxNumElements(0, s8, HasAVX512 ? 64 : (HasAVX ? 32 : 16))
- .clampMaxNumElements(0, s16, HasAVX512 ? 32 : (HasAVX ? 16 : 8))
- .clampMaxNumElements(0, s32, HasAVX512 ? 16 : (HasAVX ? 8 : 4))
- .clampMaxNumElements(0, s64, HasAVX512 ? 8 : (HasAVX ? 4 : 2))
- .widenScalarToNextPow2(0, /*Min=*/32)
- .clampScalar(0, s8, sMaxScalar)
- .scalarize(0);
-
getActionDefinitionsBuilder(G_BRCOND).legalFor({s1});
// pointer handling
@@ -592,11 +592,6 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
.minScalar(0, LLT::scalar(32))
.libcall();
- getActionDefinitionsBuilder({G_FREEZE, G_CONSTANT_FOLD_BARRIER})
- .legalFor({s8, s16, s32, s64, p0})
- .widenScalarToNextPow2(0, /*Min=*/8)
- .clampScalar(0, s8, sMaxScalar);
-
getLegacyLegalizerInfo().computeTables();
verify(*STI.getInstrInfo());
}
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 564810c..83bd6ac 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -662,6 +662,7 @@ def VINSERTPSZrri : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
"vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, timm:$src3))]>,
EVEX, VVVV, Sched<[SchedWriteFShuffle.XMM]>;
+let mayLoad = 1 in
def VINSERTPSZrmi : AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
(ins VR128X:$src1, f32mem:$src2, u8imm:$src3),
"vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
@@ -1293,6 +1294,7 @@ multiclass avx512_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
SDPatternOperator OpNode,
X86VectorVTInfo _Dst,
X86VectorVTInfo _Src> {
+ let hasSideEffects = 0, mayLoad = 1 in
defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
(ins _Src.MemOp:$src), OpcodeStr, "$src", "$src",
(_Dst.VT (OpNode addr:$src))>,
@@ -1748,6 +1750,7 @@ let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
(_.VT (X86VPermt2 _.RC:$src1, IdxVT.RC:$src2, _.RC:$src3)), 1>,
EVEX, VVVV, AVX5128IBase, Sched<[sched]>;
+ let hasSideEffects = 0, mayLoad = 1 in
defm rm: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins IdxVT.RC:$src2, _.MemOp:$src3),
OpcodeStr, "$src3, $src2", "$src2, $src3",
@@ -1759,7 +1762,7 @@ let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
multiclass avx512_perm_t_mb<bits<8> opc, string OpcodeStr,
X86FoldableSchedWrite sched,
X86VectorVTInfo _, X86VectorVTInfo IdxVT> {
- let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in
+ let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain, hasSideEffects = 0, mayLoad = 1 in
defm rmb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins IdxVT.RC:$src2, _.ScalarMemOp:$src3),
OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
@@ -1987,6 +1990,7 @@ multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeSAE,
_.FRC:$src2,
timm:$cc))]>,
EVEX, VVVV, VEX_LIG, Sched<[sched]>, SIMD_EXC;
+ let mayLoad = 1 in
def rmi : AVX512Ii8<0xC2, MRMSrcMem,
(outs _.KRC:$dst),
(ins _.FRC:$src1, _.ScalarMemOp:$src2, u8imm:$cc),
@@ -2145,6 +2149,7 @@ multiclass avx512_icmp_cc<bits<8> opc, string Suffix, PatFrag Frag,
(_.VT _.RC:$src2),
cond)))]>,
EVEX, VVVV, Sched<[sched]>;
+ let mayLoad = 1 in
def rmi : AVX512AIi8<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, u8imm:$cc),
!strconcat("vpcmp", Suffix,
@@ -2167,6 +2172,7 @@ multiclass avx512_icmp_cc<bits<8> opc, string Suffix, PatFrag Frag,
(_.VT _.RC:$src2),
cond))))]>,
EVEX, VVVV, EVEX_K, Sched<[sched]>;
+ let mayLoad = 1 in
def rmik : AVX512AIi8<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
u8imm:$cc),
@@ -2198,6 +2204,7 @@ multiclass avx512_icmp_cc_rmb<bits<8> opc, string Suffix, PatFrag Frag,
PatFrag Frag_su, X86FoldableSchedWrite sched,
X86VectorVTInfo _, string Name> :
avx512_icmp_cc<opc, Suffix, Frag, Frag_su, sched, _, Name> {
+ let mayLoad = 1 in {
def rmbi : AVX512AIi8<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2,
u8imm:$cc),
@@ -2221,6 +2228,7 @@ multiclass avx512_icmp_cc_rmb<bits<8> opc, string Suffix, PatFrag Frag,
(_.BroadcastLdFrag addr:$src2),
cond))))]>,
EVEX, VVVV, EVEX_K, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>;
+ }
def : Pat<(_.KVT (Frag:$cc (_.BroadcastLdFrag addr:$src2),
(_.VT _.RC:$src1), cond)),
@@ -2305,6 +2313,7 @@ let Uses = [MXCSR], mayRaiseFPException = 1 in {
(X86cmpm_su (_.VT _.RC:$src1), (_.VT _.RC:$src2), timm:$cc),
1>, Sched<[sched]>;
+ let mayLoad = 1 in {
defm rmi : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
(outs _.KRC:$dst),(ins _.RC:$src1, _.MemOp:$src2, u8imm:$cc),
"vcmp"#_.Suffix,
@@ -2329,6 +2338,7 @@ let Uses = [MXCSR], mayRaiseFPException = 1 in {
timm:$cc)>,
EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>;
}
+ }
// Patterns for selecting with loads in other operand.
def : Pat<(X86any_cmpm (_.LdFrag addr:$src2), (_.VT _.RC:$src1),
@@ -3771,6 +3781,7 @@ def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src
[(set VR128X:$dst,
(v4i32 (scalar_to_vector GR32:$src)))]>,
EVEX, Sched<[WriteVecMoveFromGpr]>;
+let mayLoad = 1 in
def VMOVDI2PDIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src),
"vmovd\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst,
@@ -3874,7 +3885,7 @@ def VMOVSS2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst),
// Move Quadword Int to Packed Quadword Int
//
-let ExeDomain = SSEPackedInt in {
+let ExeDomain = SSEPackedInt, mayLoad = 1, hasSideEffects = 0 in {
def VMOVQI2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
(ins i64mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
@@ -3930,13 +3941,13 @@ multiclass avx512_move_scalar<string asm, SDNode OpNode, PatFrag vzload_frag,
(_.VT (OpNode _.RC:$src1, _.RC:$src2)),
(_.VT _.RC:$src0))))],
_.ExeDomain>, EVEX, VVVV, EVEX_K, Sched<[SchedWriteFShuffle.XMM]>;
- let canFoldAsLoad = 1, isReMaterializable = 1 in {
+ let canFoldAsLoad = 1, isReMaterializable = 1, mayLoad = 1, hasSideEffects = 0 in {
def rm : AVX512PI<0x10, MRMSrcMem, (outs _.RC:$dst), (ins _.ScalarMemOp:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
[(set _.RC:$dst, (_.VT (vzload_frag addr:$src)))],
_.ExeDomain>, EVEX, Sched<[WriteFLoad]>;
// _alt version uses FR32/FR64 register class.
- let isCodeGenOnly = 1 in
+ let isCodeGenOnly = 1, mayLoad = 1, hasSideEffects = 0 in
def rm_alt : AVX512PI<0x10, MRMSrcMem, (outs _.FRC:$dst), (ins _.ScalarMemOp:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
[(set _.FRC:$dst, (_.ScalarLdFrag addr:$src))],
@@ -4557,6 +4568,7 @@ let Predicates = [HasAVX512] in {
// AVX-512 - Non-temporals
//===----------------------------------------------------------------------===//
+let mayLoad = 1, hasSideEffects = 0 in {
def VMOVNTDQAZrm : AVX512PI<0x2A, MRMSrcMem, (outs VR512:$dst),
(ins i512mem:$src), "vmovntdqa\t{$src, $dst|$dst, $src}",
[], SSEPackedInt>, Sched<[SchedWriteVecMoveLSNT.ZMM.RM]>,
@@ -4575,11 +4587,12 @@ let Predicates = [HasVLX] in {
[], SSEPackedInt>, Sched<[SchedWriteVecMoveLSNT.XMM.RM]>,
EVEX, T8, PD, EVEX_V128, EVEX_CD8<64, CD8VF>;
}
+}
multiclass avx512_movnt<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
X86SchedWriteMoveLS Sched,
PatFrag st_frag = alignednontemporalstore> {
- let SchedRW = [Sched.MR], AddedComplexity = 400 in
+ let mayStore = 1, SchedRW = [Sched.MR], AddedComplexity = 400 in
def mr : AVX512PI<opc, MRMDestMem, (outs), (ins _.MemOp:$dst, _.RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(st_frag (_.VT _.RC:$src), addr:$dst)],
@@ -4682,6 +4695,7 @@ multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
IsCommutable, IsCommutable>, AVX512BIBase, EVEX, VVVV,
Sched<[sched]>;
+ let mayLoad = 1, hasSideEffects = 0 in
defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -4694,6 +4708,7 @@ multiclass avx512_binop_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _, X86FoldableSchedWrite sched,
bit IsCommutable = 0> :
avx512_binop_rm<opc, OpcodeStr, OpNode, _, sched, IsCommutable> {
+ let mayLoad = 1, hasSideEffects = 0 in
defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
"${src2}"#_.BroadcastStr#", $src1",
@@ -4811,6 +4826,7 @@ multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr,
(_Src.VT _Src.RC:$src2))),
IsCommutable>,
AVX512BIBase, EVEX, VVVV, Sched<[sched]>;
+ let mayLoad = 1, hasSideEffects = 0 in {
defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
(ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -4828,6 +4844,7 @@ multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr,
(_Brdct.VT (_Brdct.BroadcastLdFrag addr:$src2)))))>,
AVX512BIBase, EVEX, VVVV, EVEX_B,
Sched<[sched.Folded, sched.ReadAfterFold]>;
+ }
}
defm VPADD : avx512_binop_rm_vl_all<0xFC, 0xFD, 0xFE, 0xD4, "vpadd", add,
@@ -4893,6 +4910,7 @@ defm VPMULTISHIFTQB : avx512_binop_all<0x83, "vpmultishiftqb", SchedWriteVecALU,
multiclass avx512_packs_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _Src, X86VectorVTInfo _Dst,
X86FoldableSchedWrite sched> {
+ let mayLoad = 1, hasSideEffects = 0 in
defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
(ins _Src.RC:$src1, _Src.ScalarMemOp:$src2),
OpcodeStr,
@@ -4916,6 +4934,7 @@ multiclass avx512_packs_rm<bits<8> opc, string OpcodeStr,
(_Src.VT _Src.RC:$src2))),
IsCommutable, IsCommutable>,
EVEX_CD8<_Src.EltSize, CD8VF>, EVEX, VVVV, Sched<[sched]>;
+ let mayLoad = 1, hasSideEffects = 0 in
defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
(ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -5370,6 +5389,7 @@ multiclass avx512_fp_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
(_.VT (VecNode _.RC:$src1, _.RC:$src2)), "_Int">,
Sched<[sched]>;
+ let mayLoad = 1 in
defm rm : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.IntScalarMemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -5384,6 +5404,7 @@ multiclass avx512_fp_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
Sched<[sched]> {
let isCommutable = IsCommutable;
}
+ let mayLoad = 1 in
def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst),
(ins _.FRC:$src1, _.ScalarMemOp:$src2),
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
@@ -5414,6 +5435,7 @@ multiclass avx512_fp_scalar_sae<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
(_.VT (VecNode _.RC:$src1, _.RC:$src2)), "_Int">,
Sched<[sched]>, SIMD_EXC;
+ let mayLoad = 1 in
defm rm : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.IntScalarMemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -5430,6 +5452,7 @@ multiclass avx512_fp_scalar_sae<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
Sched<[sched]> {
let isCommutable = IsCommutable;
}
+ let mayLoad = 1 in
def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst),
(ins _.FRC:$src1, _.ScalarMemOp:$src2),
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
@@ -5509,6 +5532,7 @@ multiclass avx512_comutable_binop_s<bits<8> opc, string OpcodeStr,
Sched<[sched]> {
let isCommutable = 1;
}
+ let mayLoad = 1 in
def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst),
(ins _.FRC:$src1, _.ScalarMemOp:$src2),
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
@@ -5737,6 +5761,7 @@ multiclass avx512_fp_scalef_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1, _.RC:$src2))>,
EVEX, VVVV, Sched<[sched]>;
+ let mayLoad = 1 in {
defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.MemOp:$src2), OpcodeStr#_.Suffix,
"$src2, $src1", "$src1, $src2",
@@ -5749,6 +5774,7 @@ multiclass avx512_fp_scalef_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
(OpNode _.RC:$src1, (_.VT (_.BroadcastLdFrag addr:$src2)))>,
EVEX, VVVV, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>;
}
+ }
}
multiclass avx512_fp_scalef_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -5759,6 +5785,7 @@ multiclass avx512_fp_scalef_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1, _.RC:$src2))>,
Sched<[sched]>;
+ let mayLoad = 1 in
defm rm: AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.IntScalarMemOp:$src2), OpcodeStr#_.Suffix,
"$src2, $src1", "$src1, $src2",
@@ -5916,6 +5943,7 @@ multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1, (i8 timm:$src2)))>,
Sched<[sched]>;
+ let mayLoad = 1 in
defm mi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst),
(ins _.MemOp:$src1, u8imm:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -5928,7 +5956,7 @@ multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
multiclass avx512_shift_rmbi<bits<8> opc, Format ImmFormM,
string OpcodeStr, SDNode OpNode,
X86FoldableSchedWrite sched, X86VectorVTInfo _> {
- let ExeDomain = _.ExeDomain in
+ let ExeDomain = _.ExeDomain, mayLoad = 1 in
defm mbi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst),
(ins _.ScalarMemOp:$src1, u8imm:$src2), OpcodeStr,
"$src2, ${src1}"#_.BroadcastStr, "${src1}"#_.BroadcastStr#", $src2",
@@ -5946,6 +5974,7 @@ multiclass avx512_shift_rrm<bits<8> opc, string OpcodeStr, SDNode OpNode,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1, (SrcVT VR128X:$src2)))>,
AVX512BIBase, EVEX, VVVV, Sched<[sched]>;
+ let mayLoad = 1 in
defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, i128mem:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -6095,6 +6124,7 @@ multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1, (_.VT _.RC:$src2)))>,
AVX5128IBase, EVEX, VVVV, Sched<[sched]>;
+ let mayLoad = 1 in
defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -6107,7 +6137,7 @@ multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
multiclass avx512_var_shift_mb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86FoldableSchedWrite sched, X86VectorVTInfo _> {
- let ExeDomain = _.ExeDomain in
+ let ExeDomain = _.ExeDomain, mayLoad = 1 in
defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
"${src2}"#_.BroadcastStr#", $src1",
@@ -6372,6 +6402,7 @@ multiclass avx512_permil_vec<bits<8> OpcVar, string OpcodeStr, SDNode OpNode,
(_.VT (OpNode _.RC:$src1,
(Ctrl.VT Ctrl.RC:$src2)))>,
T8, PD, EVEX, VVVV, Sched<[sched]>;
+ let mayLoad = 1 in {
defm rm: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, Ctrl.MemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -6389,6 +6420,7 @@ multiclass avx512_permil_vec<bits<8> OpcVar, string OpcodeStr, SDNode OpNode,
(Ctrl.VT (Ctrl.BroadcastLdFrag addr:$src2))))>,
T8, PD, EVEX, VVVV, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>,
Sched<[sched.Folded, sched.ReadAfterFold]>;
+ }
}
multiclass avx512_permil_vec_common<string OpcodeStr, bits<8> OpcVar,
@@ -7258,6 +7290,7 @@ let ExeDomain = DstVT.ExeDomain, Uses = _Uses,
(OpNode (DstVT.VT DstVT.RC:$src1), SrcRC:$src2))]>,
EVEX, VVVV, Sched<[sched, ReadDefault, ReadInt2Fpu]>;
+ let mayLoad = 1 in
def rm_Int : SI<opc, MRMSrcMem, (outs DstVT.RC:$dst),
(ins DstVT.RC:$src1, x86memop:$src2),
asm#"{"#mem#"}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
@@ -7400,6 +7433,7 @@ multiclass avx512_cvt_s_int_round<bits<8> opc, X86VectorVTInfo SrcVT,
[(set DstVT.RC:$dst, (OpNodeRnd (SrcVT.VT SrcVT.RC:$src),(i32 timm:$rc)))]>,
EVEX, VEX_LIG, EVEX_B, EVEX_RC,
Sched<[sched]>;
+ let mayLoad = 1 in
def rm_Int : SI<opc, MRMSrcMem, (outs DstVT.RC:$dst), (ins SrcVT.IntScalarMemOp:$src),
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
[(set DstVT.RC:$dst, (OpNode
@@ -7451,6 +7485,7 @@ multiclass avx512_cvt_s<bits<8> opc, string asm, X86VectorVTInfo SrcVT,
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
[(set DstVT.RC:$dst, (OpNode SrcVT.FRC:$src))]>,
EVEX, VEX_LIG, Sched<[sched]>, SIMD_EXC;
+ let mayLoad = 1 in
def rm : AVX512<opc, MRMSrcMem, (outs DstVT.RC:$dst), (ins SrcVT.ScalarMemOp:$src),
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
[(set DstVT.RC:$dst, (OpNode (SrcVT.ScalarLdFrag addr:$src)))]>,
@@ -7572,6 +7607,7 @@ let Predicates = [prd], ExeDomain = _SrcRC.ExeDomain in {
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
[(set _DstRC.RC:$dst, (OpNode _SrcRC.FRC:$src))]>,
EVEX, VEX_LIG, Sched<[sched]>, SIMD_EXC;
+ let mayLoad = 1 in
def rm : AVX512<opc, MRMSrcMem, (outs _DstRC.RC:$dst), (ins _SrcRC.ScalarMemOp:$src),
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
[(set _DstRC.RC:$dst, (OpNode (_SrcRC.ScalarLdFrag addr:$src)))]>,
@@ -7587,6 +7623,7 @@ let Predicates = [prd], ExeDomain = _SrcRC.ExeDomain in {
!strconcat(asm,"\t{{sae}, $src, $dst|$dst, $src, {sae}}"),
[(set _DstRC.RC:$dst, (OpNodeSAE (_SrcRC.VT _SrcRC.RC:$src)))]>,
EVEX, VEX_LIG, EVEX_B, Sched<[sched]>;
+ let mayLoad = 1 in
def rm_Int : AVX512<opc, MRMSrcMem, (outs _DstRC.RC:$dst),
(ins _SrcRC.IntScalarMemOp:$src),
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
@@ -7644,6 +7681,7 @@ multiclass avx512_cvt_fp_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _
(_.VT (OpNode (_.VT _.RC:$src1),
(_Src.VT _Src.RC:$src2))), "_Int">,
EVEX, VVVV, VEX_LIG, Sched<[sched]>;
+ let mayLoad = 1 in
defm rm : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _Src.IntScalarMemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -7807,6 +7845,7 @@ let Uses = [MXCSR], mayRaiseFPException = 1 in {
_.ImmAllZerosV)>,
EVEX, Sched<[sched]>;
+ let mayLoad = 1 in {
defm rm : AVX512_maskable_cvt<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins MemOp:$src),
(ins _.RC:$src0, MaskRC:$mask, MemOp:$src),
@@ -7840,6 +7879,7 @@ let Uses = [MXCSR], mayRaiseFPException = 1 in {
_.ImmAllZerosV)>,
EVEX, EVEX_B, Sched<[sched.Folded]>;
}
+ }
}
// Conversion with SAE - suppress all exceptions
multiclass avx512_vcvt_fp_sae<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
@@ -8944,6 +8984,7 @@ multiclass avx512_cvtph2ps<X86VectorVTInfo _dest, X86VectorVTInfo _src,
(X86any_cvtph2ps (_src.VT _src.RC:$src)),
(X86cvtph2ps (_src.VT _src.RC:$src))>,
T8, PD, Sched<[sched]>;
+ let mayLoad = 1 in
defm rm : AVX512_maskable_split<0x13, MRMSrcMem, _dest, (outs _dest.RC:$dst),
(ins x86memop:$src), "vcvtph2ps", "$src", "$src",
(X86any_cvtph2ps (_src.VT ld_dag)),
@@ -9161,6 +9202,7 @@ multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
"$src2, $src1", "$src1, $src2",
(OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))>,
EVEX, VVVV, VEX_LIG, Sched<[sched]>;
+ let mayLoad = 1 in
defm rm : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.IntScalarMemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -9621,6 +9663,7 @@ multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr,
(i32 timm:$src3))), "_Int">, EVEX_B,
Sched<[sched]>;
+ let mayLoad = 1 in
defm rmi : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.IntScalarMemOp:$src2, i32u8imm:$src3),
OpcodeStr,
@@ -9999,6 +10042,7 @@ multiclass avx512_pmovx_common<bits<8> opc, string OpcodeStr, X86FoldableSchedWr
(DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src)))>,
EVEX, Sched<[sched]>;
+ let mayLoad = 1 in
defm rm : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
(ins x86memop:$src), OpcodeStr ,"$src", "$src",
(DestInfo.VT (LdFrag addr:$src))>,
@@ -10601,6 +10645,7 @@ multiclass expand_by_vec_width<bits<8> opc, X86VectorVTInfo _,
(null_frag)>, AVX5128IBase,
Sched<[sched]>;
+ let mayLoad = 1 in
defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.MemOp:$src1), OpcodeStr, "$src1", "$src1",
(null_frag)>,
@@ -10673,6 +10718,7 @@ multiclass avx512_unary_fp_packed_imm<bits<8> opc, string OpcodeStr,
(OpNode (_.VT _.RC:$src1), (i32 timm:$src2)),
(MaskOpNode (_.VT _.RC:$src1), (i32 timm:$src2))>,
Sched<[sched]>;
+ let mayLoad = 1 in {
defm rmi : AVX512_maskable_split<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.MemOp:$src1, i32u8imm:$src2),
OpcodeStr#_.Suffix, "$src2, $src1", "$src1, $src2",
@@ -10691,6 +10737,7 @@ multiclass avx512_unary_fp_packed_imm<bits<8> opc, string OpcodeStr,
(i32 timm:$src2))>, EVEX_B,
Sched<[sched.Folded, sched.ReadAfterFold]>;
}
+ }
}
//handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm),{sae}
@@ -10739,6 +10786,7 @@ multiclass avx512_fp_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(_.VT _.RC:$src2),
(i32 timm:$src3))>,
Sched<[sched]>;
+ let mayLoad = 1 in {
defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3),
OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
@@ -10755,6 +10803,7 @@ multiclass avx512_fp_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(i32 timm:$src3))>, EVEX_B,
Sched<[sched.Folded, sched.ReadAfterFold]>;
}
+ }
}
//handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm)
@@ -10770,6 +10819,7 @@ multiclass avx512_3Op_rm_imm8<bits<8> opc, string OpcodeStr, SDNode OpNode,
(SrcInfo.VT SrcInfo.RC:$src2),
(i8 timm:$src3)))>,
Sched<[sched]>;
+ let mayLoad = 1 in
defm rmi : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
(ins SrcInfo.RC:$src1, SrcInfo.MemOp:$src2, u8imm:$src3),
OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
@@ -10788,7 +10838,7 @@ multiclass avx512_3Op_imm8<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86FoldableSchedWrite sched, X86VectorVTInfo _>:
avx512_3Op_rm_imm8<opc, OpcodeStr, OpNode, sched, _, _>{
- let ExeDomain = _.ExeDomain, ImmT = Imm8 in
+ let ExeDomain = _.ExeDomain, ImmT = Imm8, mayLoad = 1 in
defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
OpcodeStr, "$src3, ${src2}"#_.BroadcastStr#", $src1",
@@ -10811,6 +10861,7 @@ multiclass avx512_fp_scalar_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(_.VT _.RC:$src2),
(i32 timm:$src3))>,
Sched<[sched]>;
+ let mayLoad = 1 in
defm rmi : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.IntScalarMemOp:$src2, i32u8imm:$src3),
OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
@@ -10979,6 +11030,7 @@ multiclass avx512_shuff_packed_128_common<bits<8> opc, string OpcodeStr,
(CastInfo.VT (X86Shuf128 _.RC:$src1, _.RC:$src2,
(i8 timm:$src3)))))>,
Sched<[sched]>;
+ let mayLoad = 1 in {
defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.MemOp:$src2, u8imm:$src3),
OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
@@ -11000,6 +11052,7 @@ multiclass avx512_shuff_packed_128_common<bits<8> opc, string OpcodeStr,
(i8 timm:$src3)))))>, EVEX_B,
Sched<[sched.Folded, sched.ReadAfterFold]>;
}
+ }
}
multiclass avx512_shuff_packed_128<string OpcodeStr, X86FoldableSchedWrite sched,
@@ -11031,6 +11084,7 @@ multiclass avx512_valign<bits<8> opc, string OpcodeStr,
OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
(_.VT (X86VAlign _.RC:$src1, _.RC:$src2, (i8 timm:$src3)))>,
Sched<[sched]>;
+ let mayLoad = 1 in {
defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.MemOp:$src2, u8imm:$src3),
OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
@@ -11048,6 +11102,7 @@ multiclass avx512_valign<bits<8> opc, string OpcodeStr,
(i8 timm:$src3))>, EVEX_B,
Sched<[sched.Folded, sched.ReadAfterFold]>;
}
+ }
}
multiclass avx512_valign_common<string OpcodeStr, X86SchedWriteWidths sched,
@@ -11202,6 +11257,7 @@ multiclass avx512_unary_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(_.VT (OpNode (_.VT _.RC:$src1)))>, EVEX, AVX5128IBase,
Sched<[sched]>;
+ let mayLoad = 1 in
defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.MemOp:$src1), OpcodeStr,
"$src1", "$src1",
@@ -11214,6 +11270,7 @@ multiclass avx512_unary_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
multiclass avx512_unary_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86FoldableSchedWrite sched, X86VectorVTInfo _> :
avx512_unary_rm<opc, OpcodeStr, OpNode, sched, _> {
+ let mayLoad = 1 in
defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.ScalarMemOp:$src1), OpcodeStr,
"${src1}"#_.BroadcastStr,
@@ -11368,6 +11425,7 @@ multiclass avx512_movddup_128<bits<8> opc, string OpcodeStr,
(ins _.RC:$src), OpcodeStr, "$src", "$src",
(_.VT (X86VBroadcast (_.VT _.RC:$src)))>, EVEX,
Sched<[sched]>;
+ let mayLoad = 1 in
defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.ScalarMemOp:$src), OpcodeStr, "$src", "$src",
(_.VT (_.BroadcastLdFrag addr:$src))>,
@@ -11513,6 +11571,7 @@ defm VPEXTRQZ : avx512_extract_elt_dq<"vpextrq", v2i64x_info, GR64>, REX_W;
multiclass avx512_insert_elt_m<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _, PatFrag LdFrag,
SDPatternOperator immoperator> {
+ let mayLoad = 1 in
def rmi : AVX512Ii8<opc, MRMSrcMem, (outs _.RC:$dst),
(ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
@@ -11650,6 +11709,7 @@ multiclass avx512_psadbw_packed<bits<8> opc, SDNode OpNode,
(OpNode (_src.VT _src.RC:$src1),
(_src.VT _src.RC:$src2))))]>,
Sched<[sched]>;
+ let mayLoad = 1 in
def rm : AVX512BI<opc, MRMSrcMem,
(outs _dst.RC:$dst), (ins _src.RC:$src1, _src.MemOp:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -11751,6 +11811,7 @@ multiclass avx512_ternlog<bits<8> opc, string OpcodeStr, SDNode OpNode,
(_.VT _.RC:$src3),
(i8 timm:$src4)), 1, 1>,
AVX512AIi8Base, EVEX, VVVV, Sched<[sched]>;
+ let mayLoad = 1 in {
defm rmi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src2, _.MemOp:$src3, u8imm:$src4),
OpcodeStr, "$src4, $src3, $src2", "$src2, $src3, $src4",
@@ -11770,6 +11831,7 @@ multiclass avx512_ternlog<bits<8> opc, string OpcodeStr, SDNode OpNode,
(i8 timm:$src4)), 1, 0>, EVEX_B,
AVX512AIi8Base, EVEX, VVVV, EVEX_CD8<_.EltSize, CD8VF>,
Sched<[sched.Folded, sched.ReadAfterFold]>;
+ }
}// Constraints = "$src1 = $dst"
// Additional patterns for matching passthru operand in other positions.
@@ -12016,6 +12078,7 @@ multiclass avx512_fixupimm_packed<bits<8> opc, string OpcodeStr,
(_.VT _.RC:$src2),
(TblVT.VT _.RC:$src3),
(i32 timm:$src4))>, Sched<[sched]>;
+ let mayLoad = 1 in {
defm rmi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src2, _.MemOp:$src3, i32u8imm:$src4),
OpcodeStr#_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
@@ -12033,6 +12096,7 @@ multiclass avx512_fixupimm_packed<bits<8> opc, string OpcodeStr,
(TblVT.VT (TblVT.BroadcastLdFrag addr:$src3)),
(i32 timm:$src4))>,
EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>;
+ }
} // Constraints = "$src1 = $dst"
}
@@ -12075,6 +12139,7 @@ multiclass avx512_fixupimm_scalar<bits<8> opc, string OpcodeStr,
(_src3VT.VT _src3VT.RC:$src3),
(i32 timm:$src4))>,
EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>;
+ let mayLoad = 1 in
defm rmi : AVX512_maskable_3src_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src2, _.ScalarMemOp:$src3, i32u8imm:$src4),
OpcodeStr#_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
@@ -12417,6 +12482,7 @@ multiclass VNNI_rmb<bits<8> Op, string OpStr, SDNode OpNode,
VTI.RC:$src2, VTI.RC:$src3)),
IsCommutable, IsCommutable>,
EVEX, VVVV, T8, Sched<[sched]>;
+ let mayLoad = 1 in {
defm rm : AVX512_maskable_3src<Op, MRMSrcMem, VTI, (outs VTI.RC:$dst),
(ins VTI.RC:$src2, VTI.MemOp:$src3), OpStr,
"$src3, $src2", "$src2, $src3",
@@ -12435,6 +12501,7 @@ multiclass VNNI_rmb<bits<8> Op, string OpStr, SDNode OpNode,
T8, Sched<[sched.Folded, sched.ReadAfterFold,
sched.ReadAfterFold]>;
}
+ }
}
multiclass VNNI_common<bits<8> Op, string OpStr, SDNode OpNode,
@@ -12508,6 +12575,7 @@ multiclass VPSHUFBITQMB_rm<X86FoldableSchedWrite sched, X86VectorVTInfo VTI> {
(X86Vpshufbitqmb_su (VTI.VT VTI.RC:$src1),
(VTI.VT VTI.RC:$src2))>, EVEX, VVVV, T8, PD,
Sched<[sched]>;
+ let mayLoad = 1 in
defm rm : AVX512_maskable_cmp<0x8F, MRMSrcMem, VTI, (outs VTI.KRC:$dst),
(ins VTI.RC:$src1, VTI.MemOp:$src2),
"vpshufbitqmb",
@@ -12557,7 +12625,7 @@ multiclass GF2P8AFFINE_avx512_rmb_imm<bits<8> Op, string OpStr, SDNode OpNode,
X86FoldableSchedWrite sched, X86VectorVTInfo VTI,
X86VectorVTInfo BcstVTI>
: avx512_3Op_rm_imm8<Op, OpStr, OpNode, sched, VTI, VTI> {
- let ExeDomain = VTI.ExeDomain in
+ let ExeDomain = VTI.ExeDomain, mayLoad = 1 in
defm rmbi : AVX512_maskable<Op, MRMSrcMem, VTI, (outs VTI.RC:$dst),
(ins VTI.RC:$src1, BcstVTI.ScalarMemOp:$src2, u8imm:$src3),
OpStr, "$src3, ${src2}"#BcstVTI.BroadcastStr#", $src1",
@@ -12660,6 +12728,7 @@ multiclass avx512_vp2intersect_modes<X86FoldableSchedWrite sched, X86VectorVTInf
_.RC:$src1, (_.VT _.RC:$src2)))]>,
EVEX, VVVV, T8, XD, Sched<[sched]>;
+ let mayLoad = 1 in {
def rm : I<0x68, MRMSrcMem,
(outs _.KRPC:$dst),
(ins _.RC:$src1, _.MemOp:$src2),
@@ -12679,6 +12748,7 @@ multiclass avx512_vp2intersect_modes<X86FoldableSchedWrite sched, X86VectorVTInf
_.RC:$src1, (_.VT (_.BroadcastLdFrag addr:$src2))))]>,
EVEX, VVVV, T8, XD, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>,
Sched<[sched.Folded, sched.ReadAfterFold]>;
+ }
}
multiclass avx512_vp2intersect<X86SchedWriteWidths sched, AVX512VLVectorVTInfo _> {
@@ -12882,6 +12952,7 @@ let Predicates = [HasFP16] in {
// Move word ( r/m16) to Packed word
def VMOVW2SHrr : AVX512<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
"vmovw\t{$src, $dst|$dst, $src}", []>, T_MAP5, PD, EVEX, Sched<[WriteVecMoveFromGpr]>;
+let mayLoad = 1 in
def VMOVWrm : AVX512<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i16mem:$src),
"vmovw\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst,
@@ -13607,6 +13678,7 @@ multiclass avx512_cfmbinop_sh_common<bits<8> opc, string OpcodeStr, SDNode OpNod
(v4f32 (OpNode VR128X:$src1, VR128X:$src2)),
IsCommutable, IsCommutable, IsCommutable,
X86selects, "@earlyclobber $dst">, Sched<[WriteFMAX]>;
+ let mayLoad = 1 in
defm rm : AVX512_maskable<opc, MRMSrcMem, f32x_info, (outs VR128X:$dst),
(ins VR128X:$src1, ssmem:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
diff --git a/llvm/lib/TargetParser/TargetParser.cpp b/llvm/lib/TargetParser/TargetParser.cpp
index 34b09b1..b906690 100644
--- a/llvm/lib/TargetParser/TargetParser.cpp
+++ b/llvm/lib/TargetParser/TargetParser.cpp
@@ -444,6 +444,7 @@ static void fillAMDGCNFeatureMap(StringRef GPU, const Triple &T,
Features["atomic-fmin-fmax-global-f32"] = true;
Features["atomic-fmin-fmax-global-f64"] = true;
Features["wavefrontsize32"] = true;
+ Features["cluster"] = true;
break;
case GK_GFX1201:
case GK_GFX1200:
diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
index faeab95..cfdfd94 100644
--- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
+++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
@@ -3986,6 +3986,7 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::identifyClones(
void ModuleCallsiteContextGraph::updateAllocationCall(
CallInfo &Call, AllocationType AllocType) {
std::string AllocTypeString = getAllocTypeAttributeString(AllocType);
+ removeAnyExistingAmbiguousAttribute(cast<CallBase>(Call.call()));
auto A = llvm::Attribute::get(Call.call()->getFunction()->getContext(),
"memprof", AllocTypeString);
cast<CallBase>(Call.call())->addFnAttr(A);
@@ -5661,9 +5662,10 @@ bool MemProfContextDisambiguation::applyImport(Module &M) {
auto *MemProfMD = I.getMetadata(LLVMContext::MD_memprof);
// Include allocs that were already assigned a memprof function
- // attribute in the statistics.
- if (CB->getAttributes().hasFnAttr("memprof")) {
- assert(!MemProfMD);
+ // attribute in the statistics. Only do this for those that do not have
+ // memprof metadata, since we add an "ambiguous" memprof attribute by
+ // default.
+ if (CB->getAttributes().hasFnAttr("memprof") && !MemProfMD) {
CB->getAttributes().getFnAttr("memprof").getValueAsString() == "cold"
? AllocTypeColdThinBackend++
: AllocTypeNotColdThinBackend++;
@@ -5740,6 +5742,7 @@ bool MemProfContextDisambiguation::applyImport(Module &M) {
// clone J-1 (J==0 is the original clone and does not have a VMaps
// entry).
CBClone = cast<CallBase>((*VMaps[J - 1])[CB]);
+ removeAnyExistingAmbiguousAttribute(CBClone);
CBClone->addFnAttr(A);
ORE.emit(OptimizationRemark(DEBUG_TYPE, "MemprofAttribute", CBClone)
<< ore::NV("AllocationCall", CBClone) << " in clone "
diff --git a/llvm/lib/Transforms/Utils/SCCPSolver.cpp b/llvm/lib/Transforms/Utils/SCCPSolver.cpp
index af216cd..9693ae6 100644
--- a/llvm/lib/Transforms/Utils/SCCPSolver.cpp
+++ b/llvm/lib/Transforms/Utils/SCCPSolver.cpp
@@ -317,24 +317,29 @@ static Value *simplifyInstruction(SCCPSolver &Solver,
// Early exit if we know nothing about X.
if (LRange.isFullSet())
return nullptr;
- // We are allowed to refine the comparison to either true or false for out
- // of range inputs. Here we refine the comparison to true, i.e. we relax
- // the range check.
- auto NewCR = CR->exactUnionWith(LRange.inverse());
- // TODO: Check if we can narrow the range check to an equality test.
- // E.g, for X in [0, 4), X - 3 u< 2 -> X == 3
- if (!NewCR)
+ auto ConvertCRToICmp =
+ [&](const std::optional<ConstantRange> &NewCR) -> Value * {
+ ICmpInst::Predicate Pred;
+ APInt RHS;
+ // Check if we can represent NewCR as an icmp predicate.
+ if (NewCR && NewCR->getEquivalentICmp(Pred, RHS)) {
+ IRBuilder<NoFolder> Builder(&Inst);
+ Value *NewICmp =
+ Builder.CreateICmp(Pred, X, ConstantInt::get(X->getType(), RHS));
+ InsertedValues.insert(NewICmp);
+ return NewICmp;
+ }
return nullptr;
- ICmpInst::Predicate Pred;
- APInt RHS;
- // Check if we can represent NewCR as an icmp predicate.
- if (NewCR->getEquivalentICmp(Pred, RHS)) {
- IRBuilder<NoFolder> Builder(&Inst);
- Value *NewICmp =
- Builder.CreateICmp(Pred, X, ConstantInt::get(X->getType(), RHS));
- InsertedValues.insert(NewICmp);
- return NewICmp;
- }
+ };
+ // We are allowed to refine the comparison to either true or false for out
+ // of range inputs.
+ // Here we refine the comparison to false, and check if we can narrow the
+ // range check to a simpler test.
+ if (auto *V = ConvertCRToICmp(CR->exactIntersectWith(LRange)))
+ return V;
+ // Here we refine the comparison to true, i.e. we relax the range check.
+ if (auto *V = ConvertCRToICmp(CR->exactUnionWith(LRange.inverse())))
+ return V;
}
}
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 48055ad..148bfa8 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -5734,15 +5734,66 @@ bool SimplifyCFGOpt::simplifyUnreachable(UnreachableInst *UI) {
return Changed;
}
-static bool casesAreContiguous(SmallVectorImpl<ConstantInt *> &Cases) {
+struct ContiguousCasesResult {
+ ConstantInt *Min;
+ ConstantInt *Max;
+ BasicBlock *Dest;
+ BasicBlock *OtherDest;
+ SmallVectorImpl<ConstantInt *> *Cases;
+ SmallVectorImpl<ConstantInt *> *OtherCases;
+};
+
+static std::optional<ContiguousCasesResult>
+findContiguousCases(Value *Condition, SmallVectorImpl<ConstantInt *> &Cases,
+ SmallVectorImpl<ConstantInt *> &OtherCases,
+ BasicBlock *Dest, BasicBlock *OtherDest) {
assert(Cases.size() >= 1);
array_pod_sort(Cases.begin(), Cases.end(), constantIntSortPredicate);
- for (size_t I = 1, E = Cases.size(); I != E; ++I) {
- if (Cases[I - 1]->getValue() != Cases[I]->getValue() + 1)
- return false;
+ const APInt &Min = Cases.back()->getValue();
+ const APInt &Max = Cases.front()->getValue();
+ APInt Offset = Max - Min;
+ size_t ContiguousOffset = Cases.size() - 1;
+ if (Offset == ContiguousOffset) {
+ return ContiguousCasesResult{
+ /*Min=*/Cases.back(),
+ /*Max=*/Cases.front(),
+ /*Dest=*/Dest,
+ /*OtherDest=*/OtherDest,
+ /*Cases=*/&Cases,
+ /*OtherCases=*/&OtherCases,
+ };
}
- return true;
+ ConstantRange CR = computeConstantRange(Condition, /*ForSigned=*/false);
+ // If this is a wrapping contiguous range, that is, [Min, OtherMin] +
+ // [OtherMax, Max] (also [OtherMax, OtherMin]), [OtherMin+1, OtherMax-1] is a
+ // contiguous range for the other destination. N.B. If CR is not a full range,
+ // Max+1 is not equal to Min. It's not continuous in arithmetic.
+ if (Max == CR.getUnsignedMax() && Min == CR.getUnsignedMin()) {
+ assert(Cases.size() >= 2);
+ auto *It =
+ std::adjacent_find(Cases.begin(), Cases.end(), [](auto L, auto R) {
+ return L->getValue() != R->getValue() + 1;
+ });
+ if (It == Cases.end())
+ return std::nullopt;
+ auto [OtherMax, OtherMin] = std::make_pair(*It, *std::next(It));
+ if ((Max - OtherMax->getValue()) + (OtherMin->getValue() - Min) ==
+ Cases.size() - 2) {
+ return ContiguousCasesResult{
+ /*Min=*/cast<ConstantInt>(
+ ConstantInt::get(OtherMin->getType(), OtherMin->getValue() + 1)),
+ /*Max=*/
+ cast<ConstantInt>(
+ ConstantInt::get(OtherMax->getType(), OtherMax->getValue() - 1)),
+ /*Dest=*/OtherDest,
+ /*OtherDest=*/Dest,
+ /*Cases=*/&OtherCases,
+ /*OtherCases=*/&Cases,
+ };
+ }
+ }
+ return std::nullopt;
}
static void createUnreachableSwitchDefault(SwitchInst *Switch,
@@ -5779,7 +5830,6 @@ bool SimplifyCFGOpt::turnSwitchRangeIntoICmp(SwitchInst *SI,
bool HasDefault = !SI->defaultDestUnreachable();
auto *BB = SI->getParent();
-
// Partition the cases into two sets with different destinations.
BasicBlock *DestA = HasDefault ? SI->getDefaultDest() : nullptr;
BasicBlock *DestB = nullptr;
@@ -5813,37 +5863,62 @@ bool SimplifyCFGOpt::turnSwitchRangeIntoICmp(SwitchInst *SI,
assert(!CasesA.empty() || HasDefault);
// Figure out if one of the sets of cases form a contiguous range.
- SmallVectorImpl<ConstantInt *> *ContiguousCases = nullptr;
- BasicBlock *ContiguousDest = nullptr;
- BasicBlock *OtherDest = nullptr;
- if (!CasesA.empty() && casesAreContiguous(CasesA)) {
- ContiguousCases = &CasesA;
- ContiguousDest = DestA;
- OtherDest = DestB;
- } else if (casesAreContiguous(CasesB)) {
- ContiguousCases = &CasesB;
- ContiguousDest = DestB;
- OtherDest = DestA;
- } else
- return false;
+ std::optional<ContiguousCasesResult> ContiguousCases;
+
+ // Only one icmp is needed when there is only one case.
+ if (!HasDefault && CasesA.size() == 1)
+ ContiguousCases = ContiguousCasesResult{
+ /*Min=*/CasesA[0],
+ /*Max=*/CasesA[0],
+ /*Dest=*/DestA,
+ /*OtherDest=*/DestB,
+ /*Cases=*/&CasesA,
+ /*OtherCases=*/&CasesB,
+ };
+ else if (CasesB.size() == 1)
+ ContiguousCases = ContiguousCasesResult{
+ /*Min=*/CasesB[0],
+ /*Max=*/CasesB[0],
+ /*Dest=*/DestB,
+ /*OtherDest=*/DestA,
+ /*Cases=*/&CasesB,
+ /*OtherCases=*/&CasesA,
+ };
+ // Correctness: Cases to the default destination cannot be contiguous cases.
+ else if (!HasDefault)
+ ContiguousCases =
+ findContiguousCases(SI->getCondition(), CasesA, CasesB, DestA, DestB);
- // Start building the compare and branch.
+ if (!ContiguousCases)
+ ContiguousCases =
+ findContiguousCases(SI->getCondition(), CasesB, CasesA, DestB, DestA);
- Constant *Offset = ConstantExpr::getNeg(ContiguousCases->back());
- Constant *NumCases =
- ConstantInt::get(Offset->getType(), ContiguousCases->size());
+ if (!ContiguousCases)
+ return false;
- Value *Sub = SI->getCondition();
- if (!Offset->isNullValue())
- Sub = Builder.CreateAdd(Sub, Offset, Sub->getName() + ".off");
+ auto [Min, Max, Dest, OtherDest, Cases, OtherCases] = *ContiguousCases;
- Value *Cmp;
+ // Start building the compare and branch.
+
+ Constant *Offset = ConstantExpr::getNeg(Min);
+ Constant *NumCases = ConstantInt::get(Offset->getType(),
+ Max->getValue() - Min->getValue() + 1);
+ BranchInst *NewBI;
+ if (NumCases->isOneValue()) {
+ assert(Max->getValue() == Min->getValue());
+ Value *Cmp = Builder.CreateICmpEQ(SI->getCondition(), Min);
+ NewBI = Builder.CreateCondBr(Cmp, Dest, OtherDest);
+ }
// If NumCases overflowed, then all possible values jump to the successor.
- if (NumCases->isNullValue() && !ContiguousCases->empty())
- Cmp = ConstantInt::getTrue(SI->getContext());
- else
- Cmp = Builder.CreateICmpULT(Sub, NumCases, "switch");
- BranchInst *NewBI = Builder.CreateCondBr(Cmp, ContiguousDest, OtherDest);
+ else if (NumCases->isNullValue() && !Cases->empty()) {
+ NewBI = Builder.CreateBr(Dest);
+ } else {
+ Value *Sub = SI->getCondition();
+ if (!Offset->isNullValue())
+ Sub = Builder.CreateAdd(Sub, Offset, Sub->getName() + ".off");
+ Value *Cmp = Builder.CreateICmpULT(Sub, NumCases, "switch");
+ NewBI = Builder.CreateCondBr(Cmp, Dest, OtherDest);
+ }
// Update weight for the newly-created conditional branch.
if (hasBranchWeightMD(*SI)) {
@@ -5853,7 +5928,7 @@ bool SimplifyCFGOpt::turnSwitchRangeIntoICmp(SwitchInst *SI,
uint64_t TrueWeight = 0;
uint64_t FalseWeight = 0;
for (size_t I = 0, E = Weights.size(); I != E; ++I) {
- if (SI->getSuccessor(I) == ContiguousDest)
+ if (SI->getSuccessor(I) == Dest)
TrueWeight += Weights[I];
else
FalseWeight += Weights[I];
@@ -5868,15 +5943,15 @@ bool SimplifyCFGOpt::turnSwitchRangeIntoICmp(SwitchInst *SI,
}
// Prune obsolete incoming values off the successors' PHI nodes.
- for (auto BBI = ContiguousDest->begin(); isa<PHINode>(BBI); ++BBI) {
- unsigned PreviousEdges = ContiguousCases->size();
- if (ContiguousDest == SI->getDefaultDest())
+ for (auto BBI = Dest->begin(); isa<PHINode>(BBI); ++BBI) {
+ unsigned PreviousEdges = Cases->size();
+ if (Dest == SI->getDefaultDest())
++PreviousEdges;
for (unsigned I = 0, E = PreviousEdges - 1; I != E; ++I)
cast<PHINode>(BBI)->removeIncomingValue(SI->getParent());
}
for (auto BBI = OtherDest->begin(); isa<PHINode>(BBI); ++BBI) {
- unsigned PreviousEdges = SI->getNumCases() - ContiguousCases->size();
+ unsigned PreviousEdges = OtherCases->size();
if (OtherDest == SI->getDefaultDest())
++PreviousEdges;
for (unsigned I = 0, E = PreviousEdges - 1; I != E; ++I)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 56a3d6d..e434e73 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -8201,211 +8201,6 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
}
}
-/// Create and return a ResumePhi for \p WideIV, unless it is truncated. If the
-/// induction recipe is not canonical, creates a VPDerivedIVRecipe to compute
-/// the end value of the induction.
-static VPInstruction *addResumePhiRecipeForInduction(
- VPWidenInductionRecipe *WideIV, VPBuilder &VectorPHBuilder,
- VPBuilder &ScalarPHBuilder, VPTypeAnalysis &TypeInfo, VPValue *VectorTC) {
- auto *WideIntOrFp = dyn_cast<VPWidenIntOrFpInductionRecipe>(WideIV);
- // Truncated wide inductions resume from the last lane of their vector value
- // in the last vector iteration which is handled elsewhere.
- if (WideIntOrFp && WideIntOrFp->getTruncInst())
- return nullptr;
-
- VPValue *Start = WideIV->getStartValue();
- VPValue *Step = WideIV->getStepValue();
- const InductionDescriptor &ID = WideIV->getInductionDescriptor();
- VPValue *EndValue = VectorTC;
- if (!WideIntOrFp || !WideIntOrFp->isCanonical()) {
- EndValue = VectorPHBuilder.createDerivedIV(
- ID.getKind(), dyn_cast_or_null<FPMathOperator>(ID.getInductionBinOp()),
- Start, VectorTC, Step);
- }
-
- // EndValue is derived from the vector trip count (which has the same type as
- // the widest induction) and thus may be wider than the induction here.
- Type *ScalarTypeOfWideIV = TypeInfo.inferScalarType(WideIV);
- if (ScalarTypeOfWideIV != TypeInfo.inferScalarType(EndValue)) {
- EndValue = VectorPHBuilder.createScalarCast(Instruction::Trunc, EndValue,
- ScalarTypeOfWideIV,
- WideIV->getDebugLoc());
- }
-
- auto *ResumePhiRecipe = ScalarPHBuilder.createScalarPhi(
- {EndValue, Start}, WideIV->getDebugLoc(), "bc.resume.val");
- return ResumePhiRecipe;
-}
-
-/// Create resume phis in the scalar preheader for first-order recurrences,
-/// reductions and inductions, and update the VPIRInstructions wrapping the
-/// original phis in the scalar header. End values for inductions are added to
-/// \p IVEndValues.
-static void addScalarResumePhis(VPRecipeBuilder &Builder, VPlan &Plan,
- DenseMap<VPValue *, VPValue *> &IVEndValues) {
- VPTypeAnalysis TypeInfo(Plan);
- auto *ScalarPH = Plan.getScalarPreheader();
- auto *MiddleVPBB = cast<VPBasicBlock>(ScalarPH->getPredecessors()[0]);
- VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion();
- VPBuilder VectorPHBuilder(
- cast<VPBasicBlock>(VectorRegion->getSinglePredecessor()));
- VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->getFirstNonPhi());
- VPBuilder ScalarPHBuilder(ScalarPH);
- for (VPRecipeBase &ScalarPhiR : Plan.getScalarHeader()->phis()) {
- auto *ScalarPhiIRI = cast<VPIRPhi>(&ScalarPhiR);
-
- // TODO: Extract final value from induction recipe initially, optimize to
- // pre-computed end value together in optimizeInductionExitUsers.
- auto *VectorPhiR =
- cast<VPHeaderPHIRecipe>(Builder.getRecipe(&ScalarPhiIRI->getIRPhi()));
- if (auto *WideIVR = dyn_cast<VPWidenInductionRecipe>(VectorPhiR)) {
- if (VPInstruction *ResumePhi = addResumePhiRecipeForInduction(
- WideIVR, VectorPHBuilder, ScalarPHBuilder, TypeInfo,
- &Plan.getVectorTripCount())) {
- assert(isa<VPPhi>(ResumePhi) && "Expected a phi");
- IVEndValues[WideIVR] = ResumePhi->getOperand(0);
- ScalarPhiIRI->addOperand(ResumePhi);
- continue;
- }
- // TODO: Also handle truncated inductions here. Computing end-values
- // separately should be done as VPlan-to-VPlan optimization, after
- // legalizing all resume values to use the last lane from the loop.
- assert(cast<VPWidenIntOrFpInductionRecipe>(VectorPhiR)->getTruncInst() &&
- "should only skip truncated wide inductions");
- continue;
- }
-
- // The backedge value provides the value to resume coming out of a loop,
- // which for FORs is a vector whose last element needs to be extracted. The
- // start value provides the value if the loop is bypassed.
- bool IsFOR = isa<VPFirstOrderRecurrencePHIRecipe>(VectorPhiR);
- auto *ResumeFromVectorLoop = VectorPhiR->getBackedgeValue();
- assert(VectorRegion->getSingleSuccessor() == Plan.getMiddleBlock() &&
- "Cannot handle loops with uncountable early exits");
- if (IsFOR)
- ResumeFromVectorLoop = MiddleBuilder.createNaryOp(
- VPInstruction::ExtractLastElement, {ResumeFromVectorLoop}, {},
- "vector.recur.extract");
- StringRef Name = IsFOR ? "scalar.recur.init" : "bc.merge.rdx";
- auto *ResumePhiR = ScalarPHBuilder.createScalarPhi(
- {ResumeFromVectorLoop, VectorPhiR->getStartValue()}, {}, Name);
- ScalarPhiIRI->addOperand(ResumePhiR);
- }
-}
-
-/// Handle users in the exit block for first order reductions in the original
-/// exit block. The penultimate value of recurrences is fed to their LCSSA phi
-/// users in the original exit block using the VPIRInstruction wrapping to the
-/// LCSSA phi.
-static void addExitUsersForFirstOrderRecurrences(VPlan &Plan, VFRange &Range) {
- VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion();
- auto *ScalarPHVPBB = Plan.getScalarPreheader();
- auto *MiddleVPBB = Plan.getMiddleBlock();
- VPBuilder ScalarPHBuilder(ScalarPHVPBB);
- VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->getFirstNonPhi());
-
- auto IsScalableOne = [](ElementCount VF) -> bool {
- return VF == ElementCount::getScalable(1);
- };
-
- for (auto &HeaderPhi : VectorRegion->getEntryBasicBlock()->phis()) {
- auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&HeaderPhi);
- if (!FOR)
- continue;
-
- assert(VectorRegion->getSingleSuccessor() == Plan.getMiddleBlock() &&
- "Cannot handle loops with uncountable early exits");
-
- // This is the second phase of vectorizing first-order recurrences, creating
- // extract for users outside the loop. An overview of the transformation is
- // described below. Suppose we have the following loop with some use after
- // the loop of the last a[i-1],
- //
- // for (int i = 0; i < n; ++i) {
- // t = a[i - 1];
- // b[i] = a[i] - t;
- // }
- // use t;
- //
- // There is a first-order recurrence on "a". For this loop, the shorthand
- // scalar IR looks like:
- //
- // scalar.ph:
- // s.init = a[-1]
- // br scalar.body
- //
- // scalar.body:
- // i = phi [0, scalar.ph], [i+1, scalar.body]
- // s1 = phi [s.init, scalar.ph], [s2, scalar.body]
- // s2 = a[i]
- // b[i] = s2 - s1
- // br cond, scalar.body, exit.block
- //
- // exit.block:
- // use = lcssa.phi [s1, scalar.body]
- //
- // In this example, s1 is a recurrence because it's value depends on the
- // previous iteration. In the first phase of vectorization, we created a
- // VPFirstOrderRecurrencePHIRecipe v1 for s1. Now we create the extracts
- // for users in the scalar preheader and exit block.
- //
- // vector.ph:
- // v_init = vector(..., ..., ..., a[-1])
- // br vector.body
- //
- // vector.body
- // i = phi [0, vector.ph], [i+4, vector.body]
- // v1 = phi [v_init, vector.ph], [v2, vector.body]
- // v2 = a[i, i+1, i+2, i+3]
- // b[i] = v2 - v1
- // // Next, third phase will introduce v1' = splice(v1(3), v2(0, 1, 2))
- // b[i, i+1, i+2, i+3] = v2 - v1
- // br cond, vector.body, middle.block
- //
- // middle.block:
- // vector.recur.extract.for.phi = v2(2)
- // vector.recur.extract = v2(3)
- // br cond, scalar.ph, exit.block
- //
- // scalar.ph:
- // scalar.recur.init = phi [vector.recur.extract, middle.block],
- // [s.init, otherwise]
- // br scalar.body
- //
- // scalar.body:
- // i = phi [0, scalar.ph], [i+1, scalar.body]
- // s1 = phi [scalar.recur.init, scalar.ph], [s2, scalar.body]
- // s2 = a[i]
- // b[i] = s2 - s1
- // br cond, scalar.body, exit.block
- //
- // exit.block:
- // lo = lcssa.phi [s1, scalar.body],
- // [vector.recur.extract.for.phi, middle.block]
- //
- // Now update VPIRInstructions modeling LCSSA phis in the exit block.
- // Extract the penultimate value of the recurrence and use it as operand for
- // the VPIRInstruction modeling the phi.
- for (VPUser *U : FOR->users()) {
- using namespace llvm::VPlanPatternMatch;
- if (!match(U, m_ExtractLastElement(m_Specific(FOR))))
- continue;
- // For VF vscale x 1, if vscale = 1, we are unable to extract the
- // penultimate value of the recurrence. Instead we rely on the existing
- // extract of the last element from the result of
- // VPInstruction::FirstOrderRecurrenceSplice.
- // TODO: Consider vscale_range info and UF.
- if (LoopVectorizationPlanner::getDecisionAndClampRange(IsScalableOne,
- Range))
- return;
- VPValue *PenultimateElement = MiddleBuilder.createNaryOp(
- VPInstruction::ExtractPenultimateElement, {FOR->getBackedgeValue()},
- {}, "vector.recur.extract.for.phi");
- cast<VPInstruction>(U)->replaceAllUsesWith(PenultimateElement);
- }
- }
-}
-
VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
VPlanPtr Plan, VFRange &Range, LoopVersioning *LVer) {
@@ -8598,9 +8393,11 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
R->setOperand(1, WideIV->getStepValue());
}
- addExitUsersForFirstOrderRecurrences(*Plan, Range);
+ VPlanTransforms::runPass(
+ VPlanTransforms::addExitUsersForFirstOrderRecurrences, *Plan, Range);
DenseMap<VPValue *, VPValue *> IVEndValues;
- addScalarResumePhis(RecipeBuilder, *Plan, IVEndValues);
+ VPlanTransforms::runPass(VPlanTransforms::addScalarResumePhis, *Plan,
+ RecipeBuilder, IVEndValues);
// ---------------------------------------------------------------------------
// Transform initial VPlan: Apply previously taken decisions, in order, to
@@ -8711,7 +8508,8 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) {
DenseMap<VPValue *, VPValue *> IVEndValues;
// TODO: IVEndValues are not used yet in the native path, to optimize exit
// values.
- addScalarResumePhis(RecipeBuilder, *Plan, IVEndValues);
+ VPlanTransforms::runPass(VPlanTransforms::addScalarResumePhis, *Plan,
+ RecipeBuilder, IVEndValues);
assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid");
return Plan;
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index fedca65..91c3d42 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -10620,7 +10620,8 @@ class InstructionsCompatibilityAnalysis {
/// Checks if the opcode is supported as the main opcode for copyable
/// elements.
static bool isSupportedOpcode(const unsigned Opcode) {
- return Opcode == Instruction::Add || Opcode == Instruction::LShr;
+ return Opcode == Instruction::Add || Opcode == Instruction::LShr ||
+ Opcode == Instruction::Shl;
}
/// Identifies the best candidate value, which represents main opcode
@@ -10937,6 +10938,7 @@ public:
switch (MainOpcode) {
case Instruction::Add:
case Instruction::LShr:
+ case Instruction::Shl:
VectorCost = TTI.getArithmeticInstrCost(MainOpcode, VecTy, Kind);
break;
default:
@@ -22006,6 +22008,8 @@ bool BoUpSLP::collectValuesToDemote(
return all_of(E.Scalars, [&](Value *V) {
if (isa<PoisonValue>(V))
return true;
+ if (E.isCopyableElement(V))
+ return true;
auto *I = cast<Instruction>(V);
KnownBits AmtKnownBits = computeKnownBits(I->getOperand(1), *DL);
return AmtKnownBits.getMaxValue().ult(BitWidth);
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index ca63bf3..ebf833e 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -4198,3 +4198,202 @@ void VPlanTransforms::addBranchWeightToMiddleTerminator(
MDB.createBranchWeights({1, VectorStep - 1}, /*IsExpected=*/false);
MiddleTerm->addMetadata(LLVMContext::MD_prof, BranchWeights);
}
+
+/// Create and return a ResumePhi for \p WideIV, unless it is truncated. If the
+/// induction recipe is not canonical, creates a VPDerivedIVRecipe to compute
+/// the end value of the induction.
+static VPInstruction *addResumePhiRecipeForInduction(
+ VPWidenInductionRecipe *WideIV, VPBuilder &VectorPHBuilder,
+ VPBuilder &ScalarPHBuilder, VPTypeAnalysis &TypeInfo, VPValue *VectorTC) {
+ auto *WideIntOrFp = dyn_cast<VPWidenIntOrFpInductionRecipe>(WideIV);
+ // Truncated wide inductions resume from the last lane of their vector value
+ // in the last vector iteration which is handled elsewhere.
+ if (WideIntOrFp && WideIntOrFp->getTruncInst())
+ return nullptr;
+
+ VPValue *Start = WideIV->getStartValue();
+ VPValue *Step = WideIV->getStepValue();
+ const InductionDescriptor &ID = WideIV->getInductionDescriptor();
+ VPValue *EndValue = VectorTC;
+ if (!WideIntOrFp || !WideIntOrFp->isCanonical()) {
+ EndValue = VectorPHBuilder.createDerivedIV(
+ ID.getKind(), dyn_cast_or_null<FPMathOperator>(ID.getInductionBinOp()),
+ Start, VectorTC, Step);
+ }
+
+ // EndValue is derived from the vector trip count (which has the same type as
+ // the widest induction) and thus may be wider than the induction here.
+ Type *ScalarTypeOfWideIV = TypeInfo.inferScalarType(WideIV);
+ if (ScalarTypeOfWideIV != TypeInfo.inferScalarType(EndValue)) {
+ EndValue = VectorPHBuilder.createScalarCast(Instruction::Trunc, EndValue,
+ ScalarTypeOfWideIV,
+ WideIV->getDebugLoc());
+ }
+
+ auto *ResumePhiRecipe = ScalarPHBuilder.createScalarPhi(
+ {EndValue, Start}, WideIV->getDebugLoc(), "bc.resume.val");
+ return ResumePhiRecipe;
+}
+
+void VPlanTransforms::addScalarResumePhis(
+ VPlan &Plan, VPRecipeBuilder &Builder,
+ DenseMap<VPValue *, VPValue *> &IVEndValues) {
+ VPTypeAnalysis TypeInfo(Plan);
+ auto *ScalarPH = Plan.getScalarPreheader();
+ auto *MiddleVPBB = cast<VPBasicBlock>(ScalarPH->getPredecessors()[0]);
+ VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion();
+ VPBuilder VectorPHBuilder(
+ cast<VPBasicBlock>(VectorRegion->getSinglePredecessor()));
+ VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->getFirstNonPhi());
+ VPBuilder ScalarPHBuilder(ScalarPH);
+ for (VPRecipeBase &ScalarPhiR : Plan.getScalarHeader()->phis()) {
+ auto *ScalarPhiIRI = cast<VPIRPhi>(&ScalarPhiR);
+
+ // TODO: Extract final value from induction recipe initially, optimize to
+ // pre-computed end value together in optimizeInductionExitUsers.
+ auto *VectorPhiR =
+ cast<VPHeaderPHIRecipe>(Builder.getRecipe(&ScalarPhiIRI->getIRPhi()));
+ if (auto *WideIVR = dyn_cast<VPWidenInductionRecipe>(VectorPhiR)) {
+ if (VPInstruction *ResumePhi = addResumePhiRecipeForInduction(
+ WideIVR, VectorPHBuilder, ScalarPHBuilder, TypeInfo,
+ &Plan.getVectorTripCount())) {
+ assert(isa<VPPhi>(ResumePhi) && "Expected a phi");
+ IVEndValues[WideIVR] = ResumePhi->getOperand(0);
+ ScalarPhiIRI->addOperand(ResumePhi);
+ continue;
+ }
+ // TODO: Also handle truncated inductions here. Computing end-values
+ // separately should be done as VPlan-to-VPlan optimization, after
+ // legalizing all resume values to use the last lane from the loop.
+ assert(cast<VPWidenIntOrFpInductionRecipe>(VectorPhiR)->getTruncInst() &&
+ "should only skip truncated wide inductions");
+ continue;
+ }
+
+ // The backedge value provides the value to resume coming out of a loop,
+ // which for FORs is a vector whose last element needs to be extracted. The
+ // start value provides the value if the loop is bypassed.
+ bool IsFOR = isa<VPFirstOrderRecurrencePHIRecipe>(VectorPhiR);
+ auto *ResumeFromVectorLoop = VectorPhiR->getBackedgeValue();
+ assert(VectorRegion->getSingleSuccessor() == Plan.getMiddleBlock() &&
+ "Cannot handle loops with uncountable early exits");
+ if (IsFOR)
+ ResumeFromVectorLoop = MiddleBuilder.createNaryOp(
+ VPInstruction::ExtractLastElement, {ResumeFromVectorLoop}, {},
+ "vector.recur.extract");
+ StringRef Name = IsFOR ? "scalar.recur.init" : "bc.merge.rdx";
+ auto *ResumePhiR = ScalarPHBuilder.createScalarPhi(
+ {ResumeFromVectorLoop, VectorPhiR->getStartValue()}, {}, Name);
+ ScalarPhiIRI->addOperand(ResumePhiR);
+ }
+}
+
+void VPlanTransforms::addExitUsersForFirstOrderRecurrences(VPlan &Plan,
+ VFRange &Range) {
+ VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion();
+ auto *ScalarPHVPBB = Plan.getScalarPreheader();
+ auto *MiddleVPBB = Plan.getMiddleBlock();
+ VPBuilder ScalarPHBuilder(ScalarPHVPBB);
+ VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->getFirstNonPhi());
+
+ auto IsScalableOne = [](ElementCount VF) -> bool {
+ return VF == ElementCount::getScalable(1);
+ };
+
+ for (auto &HeaderPhi : VectorRegion->getEntryBasicBlock()->phis()) {
+ auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&HeaderPhi);
+ if (!FOR)
+ continue;
+
+ assert(VectorRegion->getSingleSuccessor() == Plan.getMiddleBlock() &&
+ "Cannot handle loops with uncountable early exits");
+
+ // This is the second phase of vectorizing first-order recurrences, creating
+ // extract for users outside the loop. An overview of the transformation is
+ // described below. Suppose we have the following loop with some use after
+ // the loop of the last a[i-1],
+ //
+ // for (int i = 0; i < n; ++i) {
+ // t = a[i - 1];
+ // b[i] = a[i] - t;
+ // }
+ // use t;
+ //
+ // There is a first-order recurrence on "a". For this loop, the shorthand
+ // scalar IR looks like:
+ //
+ // scalar.ph:
+ // s.init = a[-1]
+ // br scalar.body
+ //
+ // scalar.body:
+ // i = phi [0, scalar.ph], [i+1, scalar.body]
+ // s1 = phi [s.init, scalar.ph], [s2, scalar.body]
+ // s2 = a[i]
+ // b[i] = s2 - s1
+ // br cond, scalar.body, exit.block
+ //
+ // exit.block:
+ // use = lcssa.phi [s1, scalar.body]
+ //
+ // In this example, s1 is a recurrence because it's value depends on the
+ // previous iteration. In the first phase of vectorization, we created a
+ // VPFirstOrderRecurrencePHIRecipe v1 for s1. Now we create the extracts
+ // for users in the scalar preheader and exit block.
+ //
+ // vector.ph:
+ // v_init = vector(..., ..., ..., a[-1])
+ // br vector.body
+ //
+ // vector.body
+ // i = phi [0, vector.ph], [i+4, vector.body]
+ // v1 = phi [v_init, vector.ph], [v2, vector.body]
+ // v2 = a[i, i+1, i+2, i+3]
+ // b[i] = v2 - v1
+ // // Next, third phase will introduce v1' = splice(v1(3), v2(0, 1, 2))
+ // b[i, i+1, i+2, i+3] = v2 - v1
+ // br cond, vector.body, middle.block
+ //
+ // middle.block:
+ // vector.recur.extract.for.phi = v2(2)
+ // vector.recur.extract = v2(3)
+ // br cond, scalar.ph, exit.block
+ //
+ // scalar.ph:
+ // scalar.recur.init = phi [vector.recur.extract, middle.block],
+ // [s.init, otherwise]
+ // br scalar.body
+ //
+ // scalar.body:
+ // i = phi [0, scalar.ph], [i+1, scalar.body]
+ // s1 = phi [scalar.recur.init, scalar.ph], [s2, scalar.body]
+ // s2 = a[i]
+ // b[i] = s2 - s1
+ // br cond, scalar.body, exit.block
+ //
+ // exit.block:
+ // lo = lcssa.phi [s1, scalar.body],
+ // [vector.recur.extract.for.phi, middle.block]
+ //
+ // Now update VPIRInstructions modeling LCSSA phis in the exit block.
+ // Extract the penultimate value of the recurrence and use it as operand for
+ // the VPIRInstruction modeling the phi.
+ for (VPUser *U : FOR->users()) {
+ using namespace llvm::VPlanPatternMatch;
+ if (!match(U, m_ExtractLastElement(m_Specific(FOR))))
+ continue;
+ // For VF vscale x 1, if vscale = 1, we are unable to extract the
+ // penultimate value of the recurrence. Instead we rely on the existing
+ // extract of the last element from the result of
+ // VPInstruction::FirstOrderRecurrenceSplice.
+ // TODO: Consider vscale_range info and UF.
+ if (LoopVectorizationPlanner::getDecisionAndClampRange(IsScalableOne,
+ Range))
+ return;
+ VPValue *PenultimateElement = MiddleBuilder.createNaryOp(
+ VPInstruction::ExtractPenultimateElement, {FOR->getBackedgeValue()},
+ {}, "vector.recur.extract.for.phi");
+ cast<VPInstruction>(U)->replaceAllUsesWith(PenultimateElement);
+ }
+ }
+}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
index 2f00e51..5a8a2bb 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
@@ -363,6 +363,19 @@ struct VPlanTransforms {
static void
addBranchWeightToMiddleTerminator(VPlan &Plan, ElementCount VF,
std::optional<unsigned> VScaleForTuning);
+
+ /// Create resume phis in the scalar preheader for first-order recurrences,
+ /// reductions and inductions, and update the VPIRInstructions wrapping the
+ /// original phis in the scalar header. End values for inductions are added to
+ /// \p IVEndValues.
+ static void addScalarResumePhis(VPlan &Plan, VPRecipeBuilder &Builder,
+ DenseMap<VPValue *, VPValue *> &IVEndValues);
+
+ /// Handle users in the exit block for first order reductions in the original
+ /// exit block. The penultimate value of recurrences is fed to their LCSSA phi
+ /// users in the original exit block using the VPIRInstruction wrapping to the
+ /// LCSSA phi.
+ static void addExitUsersForFirstOrderRecurrences(VPlan &Plan, VFRange &Range);
};
} // namespace llvm
diff --git a/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir b/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir
deleted file mode 100644
index 0298168..0000000
--- a/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir
+++ /dev/null
@@ -1,1009 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
-# RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-zpr-predicate-spills -run-pass=greedy %s -o - | FileCheck %s
-# RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-zpr-predicate-spills -start-before=greedy -stop-after=aarch64-expand-pseudo -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=EXPAND
---- |
- source_filename = "<stdin>"
- target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
- target triple = "aarch64--linux-gnu"
-
- define aarch64_sve_vector_pcs void @zpr_predicate_spill() #0 { entry: unreachable }
-
- define aarch64_sve_vector_pcs void @zpr_predicate_spill__save_restore_nzcv() #0 { entry: unreachable }
-
- define aarch64_sve_vector_pcs void @zpr_predicate_spill__save_restore_nzcv__scavenge_csr_gpr() #0 { entry: unreachable }
-
- define aarch64_sve_vector_pcs void @zpr_predicate_spill__spill_zpr() #0 { entry: unreachable }
-
- define aarch64_sve_vector_pcs void @zpr_predicate_spill_above_p7() #0 { entry: unreachable }
-
- define aarch64_sve_vector_pcs void @zpr_predicate_spill_p4_saved() #0 { entry: unreachable }
-
- attributes #0 = {nounwind "target-features"="+sme,+sve" "aarch64_pstate_sm_compatible"}
-...
----
-name: zpr_predicate_spill
-tracksRegLiveness: true
-stack:
-liveins:
- - { reg: '$p0' }
-body: |
- bb.0.entry:
- liveins: $p0
-
- ; CHECK-LABEL: name: zpr_predicate_spill
- ; CHECK: stack:
- ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 16, alignment: 16,
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register:
- ; CHECK: liveins: $p0
- ; CHECK-NEXT: {{ $}}
- ;
- ; CHECK-NEXT: SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p0, %stack.0, 0 :: (store (s128) into %stack.0)
- ;
- ; CHECK-NEXT: $p0 = IMPLICIT_DEF
- ; CHECK-NEXT: $p1 = IMPLICIT_DEF
- ; CHECK-NEXT: $p2 = IMPLICIT_DEF
- ; CHECK-NEXT: $p3 = IMPLICIT_DEF
- ; CHECK-NEXT: $p4 = IMPLICIT_DEF
- ; CHECK-NEXT: $p5 = IMPLICIT_DEF
- ; CHECK-NEXT: $p6 = IMPLICIT_DEF
- ; CHECK-NEXT: $p7 = IMPLICIT_DEF
- ; CHECK-NEXT: $p8 = IMPLICIT_DEF
- ; CHECK-NEXT: $p9 = IMPLICIT_DEF
- ; CHECK-NEXT: $p10 = IMPLICIT_DEF
- ; CHECK-NEXT: $p11 = IMPLICIT_DEF
- ; CHECK-NEXT: $p12 = IMPLICIT_DEF
- ; CHECK-NEXT: $p13 = IMPLICIT_DEF
- ; CHECK-NEXT: $p14 = IMPLICIT_DEF
- ; CHECK-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: $p0 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0 :: (load (s128) from %stack.0)
- ;
- ; CHECK-NEXT: RET_ReallyLR implicit $p0
-
- ; EXPAND-LABEL: name: zpr_predicate_spill
- ; EXPAND: liveins: $p0, $fp, $p15, $p14, $p13, $p12, $p11, $p10, $p9, $p8, $p7, $p6, $p5, $p4
- ; EXPAND-NEXT: {{ $}}
- ;
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
- ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.14)
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -12, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p15, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.13)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p14, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.12)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p13, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 2 :: (store (s128) into %stack.11)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p12, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 3 :: (store (s128) into %stack.10)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p11, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 4 :: (store (s128) into %stack.9)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p10, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 5 :: (store (s128) into %stack.8)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p9, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 6 :: (store (s128) into %stack.7)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 7 :: (store (s128) into %stack.6)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p7, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 8 :: (store (s128) into %stack.5)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p6, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 9 :: (store (s128) into %stack.4)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p5, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 10 :: (store (s128) into %stack.3)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 11 :: (store (s128) into %stack.2)
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg
- ;
- ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p0, 1, 0
- ; EXPAND-NEXT: $x8 = ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: STR_ZXI $z0, $x8, 0 :: (store (s128) into %stack.0)
- ;
- ; EXPAND-NEXT: $p0 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p1 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p2 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p3 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p4 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p5 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p6 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p7 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p8 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p9 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p10 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p11 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p12 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p13 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p14 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $z0 = LDR_ZXI killed $x8, 0 :: (load (s128) from %stack.0)
- ; EXPAND-NEXT: $p1 = frame-destroy PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p0 = CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ;
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.13)
- ; EXPAND-NEXT: $p15 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.12)
- ; EXPAND-NEXT: $p14 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.11)
- ; EXPAND-NEXT: $p13 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.10)
- ; EXPAND-NEXT: $p12 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 4 :: (load (s128) from %stack.9)
- ; EXPAND-NEXT: $p11 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 5 :: (load (s128) from %stack.8)
- ; EXPAND-NEXT: $p10 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 6 :: (load (s128) from %stack.7)
- ; EXPAND-NEXT: $p9 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 7 :: (load (s128) from %stack.6)
- ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 8 :: (load (s128) from %stack.5)
- ; EXPAND-NEXT: $p7 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 9 :: (load (s128) from %stack.4)
- ; EXPAND-NEXT: $p6 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 10 :: (load (s128) from %stack.3)
- ; EXPAND-NEXT: $p5 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 11 :: (load (s128) from %stack.2)
- ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 12, implicit $vg
- ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.14)
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
- ; EXPAND-NEXT: RET undef $lr, implicit $p0
- %1:ppr = COPY $p0
-
- $p0 = IMPLICIT_DEF
- $p1 = IMPLICIT_DEF
- $p2 = IMPLICIT_DEF
- $p3 = IMPLICIT_DEF
- $p4 = IMPLICIT_DEF
- $p5 = IMPLICIT_DEF
- $p6 = IMPLICIT_DEF
- $p7 = IMPLICIT_DEF
- $p8 = IMPLICIT_DEF
- $p9 = IMPLICIT_DEF
- $p10 = IMPLICIT_DEF
- $p11 = IMPLICIT_DEF
- $p12 = IMPLICIT_DEF
- $p13 = IMPLICIT_DEF
- $p14 = IMPLICIT_DEF
- $p15 = IMPLICIT_DEF
-
- $p0 = COPY %1
-
- RET_ReallyLR implicit $p0
-...
----
-name: zpr_predicate_spill__save_restore_nzcv
-tracksRegLiveness: true
-stack:
-liveins:
- - { reg: '$p0' }
-body: |
- bb.0.entry:
- liveins: $p0
-
- ; CHECK-LABEL: name: zpr_predicate_spill__save_restore_nzcv
- ; CHECK: stack:
- ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 16, alignment: 16,
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register:
- ; CHECK: liveins: $p0
- ; CHECK-NEXT: {{ $}}
- ;
- ; CHECK-NEXT: $nzcv = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p0, %stack.0, 0 :: (store (s128) into %stack.0)
- ;
- ; CHECK-NEXT: $p0 = IMPLICIT_DEF
- ; CHECK-NEXT: $p1 = IMPLICIT_DEF
- ; CHECK-NEXT: $p2 = IMPLICIT_DEF
- ; CHECK-NEXT: $p3 = IMPLICIT_DEF
- ; CHECK-NEXT: $p4 = IMPLICIT_DEF
- ; CHECK-NEXT: $p5 = IMPLICIT_DEF
- ; CHECK-NEXT: $p6 = IMPLICIT_DEF
- ; CHECK-NEXT: $p7 = IMPLICIT_DEF
- ; CHECK-NEXT: $p8 = IMPLICIT_DEF
- ; CHECK-NEXT: $p9 = IMPLICIT_DEF
- ; CHECK-NEXT: $p10 = IMPLICIT_DEF
- ; CHECK-NEXT: $p11 = IMPLICIT_DEF
- ; CHECK-NEXT: $p12 = IMPLICIT_DEF
- ; CHECK-NEXT: $p13 = IMPLICIT_DEF
- ; CHECK-NEXT: $p14 = IMPLICIT_DEF
- ; CHECK-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: $p0 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0 :: (load (s128) from %stack.0)
- ;
- ; CHECK-NEXT: FAKE_USE implicit $nzcv
- ;
- ; CHECK-NEXT: RET_ReallyLR implicit $p0
-
- ; EXPAND-LABEL: name: zpr_predicate_spill__save_restore_nzcv
- ; EXPAND: liveins: $p0, $fp, $p15, $p14, $p13, $p12, $p11, $p10, $p9, $p8, $p7, $p6, $p5, $p4
- ; EXPAND-NEXT: {{ $}}
- ;
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
- ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.14)
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -12, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p15, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.13)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p14, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.12)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p13, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 2 :: (store (s128) into %stack.11)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p12, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 3 :: (store (s128) into %stack.10)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p11, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 4 :: (store (s128) into %stack.9)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p10, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 5 :: (store (s128) into %stack.8)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p9, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 6 :: (store (s128) into %stack.7)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 7 :: (store (s128) into %stack.6)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p7, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 8 :: (store (s128) into %stack.5)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p6, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 9 :: (store (s128) into %stack.4)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p5, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 10 :: (store (s128) into %stack.3)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 11 :: (store (s128) into %stack.2)
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg
- ;
- ; EXPAND-NEXT: $nzcv = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p0, 1, 0
- ; EXPAND-NEXT: $x8 = ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: STR_ZXI $z0, $x8, 0 :: (store (s128) into %stack.0)
- ;
- ; EXPAND-NEXT: $p0 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p1 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p2 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p3 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p4 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p5 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p6 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p7 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p8 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p9 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p10 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p11 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p12 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p13 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p14 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $z0 = LDR_ZXI killed $x8, 0 :: (load (s128) from %stack.0)
- ; EXPAND-NEXT: $fp = MRS 55824, implicit-def $nzcv, implicit $nzcv
- ; EXPAND-NEXT: $p0 = PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p0 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: MSR 55824, $fp, implicit-def $nzcv
- ;
- ; EXPAND-NEXT: FAKE_USE implicit $nzcv
- ;
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.13)
- ; EXPAND-NEXT: $p1 = frame-destroy PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p15 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.12)
- ; EXPAND-NEXT: $p14 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.11)
- ; EXPAND-NEXT: $p13 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.10)
- ; EXPAND-NEXT: $p12 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 4 :: (load (s128) from %stack.9)
- ; EXPAND-NEXT: $p11 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 5 :: (load (s128) from %stack.8)
- ; EXPAND-NEXT: $p10 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 6 :: (load (s128) from %stack.7)
- ; EXPAND-NEXT: $p9 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 7 :: (load (s128) from %stack.6)
- ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 8 :: (load (s128) from %stack.5)
- ; EXPAND-NEXT: $p7 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 9 :: (load (s128) from %stack.4)
- ; EXPAND-NEXT: $p6 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 10 :: (load (s128) from %stack.3)
- ; EXPAND-NEXT: $p5 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 11 :: (load (s128) from %stack.2)
- ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 12, implicit $vg
- ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.14)
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
- ; EXPAND-NEXT: RET undef $lr, implicit $p0
- $nzcv = IMPLICIT_DEF
-
- %1:ppr = COPY $p0
-
- $p0 = IMPLICIT_DEF
- $p1 = IMPLICIT_DEF
- $p2 = IMPLICIT_DEF
- $p3 = IMPLICIT_DEF
- $p4 = IMPLICIT_DEF
- $p5 = IMPLICIT_DEF
- $p6 = IMPLICIT_DEF
- $p7 = IMPLICIT_DEF
- $p8 = IMPLICIT_DEF
- $p9 = IMPLICIT_DEF
- $p10 = IMPLICIT_DEF
- $p11 = IMPLICIT_DEF
- $p12 = IMPLICIT_DEF
- $p13 = IMPLICIT_DEF
- $p14 = IMPLICIT_DEF
- $p15 = IMPLICIT_DEF
-
- $p0 = COPY %1
-
- FAKE_USE implicit $nzcv
-
- RET_ReallyLR implicit $p0
-...
----
-name: zpr_predicate_spill__save_restore_nzcv__scavenge_csr_gpr
-tracksRegLiveness: true
-stack:
-liveins:
- - { reg: '$p0' }
- - { reg: '$x0' }
- - { reg: '$x1' }
- - { reg: '$x2' }
- - { reg: '$x3' }
- - { reg: '$x4' }
- - { reg: '$x5' }
- - { reg: '$x6' }
- - { reg: '$x7' }
-body: |
- bb.0.entry:
- liveins: $p0, $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
-
- ; CHECK-LABEL: name: zpr_predicate_spill__save_restore_nzcv__scavenge_csr_gpr
- ; CHECK: stack:
- ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 16, alignment: 16,
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register:
- ; CHECK: liveins: $p0, $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
- ; CHECK-NEXT: {{ $}}
- ;
- ; CHECK-NEXT: $nzcv = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: $x8 = IMPLICIT_DEF
- ; CHECK-NEXT: $x9 = IMPLICIT_DEF
- ; CHECK-NEXT: $x10 = IMPLICIT_DEF
- ; CHECK-NEXT: $x11 = IMPLICIT_DEF
- ; CHECK-NEXT: $x12 = IMPLICIT_DEF
- ; CHECK-NEXT: $x13 = IMPLICIT_DEF
- ; CHECK-NEXT: $x14 = IMPLICIT_DEF
- ; CHECK-NEXT: $x15 = IMPLICIT_DEF
- ; CHECK-NEXT: $x16 = IMPLICIT_DEF
- ; CHECK-NEXT: $x17 = IMPLICIT_DEF
- ; CHECK-NEXT: $x18 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p0, %stack.0, 0 :: (store (s128) into %stack.0)
- ;
- ; CHECK-NEXT: $p0 = IMPLICIT_DEF
- ; CHECK-NEXT: $p1 = IMPLICIT_DEF
- ; CHECK-NEXT: $p2 = IMPLICIT_DEF
- ; CHECK-NEXT: $p3 = IMPLICIT_DEF
- ; CHECK-NEXT: $p4 = IMPLICIT_DEF
- ; CHECK-NEXT: $p5 = IMPLICIT_DEF
- ; CHECK-NEXT: $p6 = IMPLICIT_DEF
- ; CHECK-NEXT: $p7 = IMPLICIT_DEF
- ; CHECK-NEXT: $p8 = IMPLICIT_DEF
- ; CHECK-NEXT: $p9 = IMPLICIT_DEF
- ; CHECK-NEXT: $p10 = IMPLICIT_DEF
- ; CHECK-NEXT: $p11 = IMPLICIT_DEF
- ; CHECK-NEXT: $p12 = IMPLICIT_DEF
- ; CHECK-NEXT: $p13 = IMPLICIT_DEF
- ; CHECK-NEXT: $p14 = IMPLICIT_DEF
- ; CHECK-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: $p0 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0 :: (load (s128) from %stack.0)
- ;
- ; CHECK-NEXT: FAKE_USE implicit $nzcv, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18
- ;
- ; CHECK-NEXT: RET_ReallyLR implicit $p0, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit $x4, implicit $x5, implicit $x6, implicit $x7, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18
-
- ; EXPAND-LABEL: name: zpr_predicate_spill__save_restore_nzcv__scavenge_csr_gpr
- ; EXPAND: liveins: $p0, $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7, $fp, $p15, $p14, $p13, $p12, $p11, $p10, $p9, $p8, $p7, $p6, $p5, $p4
- ; EXPAND-NEXT: {{ $}}
- ;
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
- ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.14)
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -12, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p15, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.13)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p14, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.12)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p13, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 2 :: (store (s128) into %stack.11)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p12, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 3 :: (store (s128) into %stack.10)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p11, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 4 :: (store (s128) into %stack.9)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p10, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 5 :: (store (s128) into %stack.8)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p9, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 6 :: (store (s128) into %stack.7)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 7 :: (store (s128) into %stack.6)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p7, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 8 :: (store (s128) into %stack.5)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p6, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 9 :: (store (s128) into %stack.4)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p5, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 10 :: (store (s128) into %stack.3)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 11 :: (store (s128) into %stack.2)
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg
- ;
- ; EXPAND-NEXT: $nzcv = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $x8 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x9 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x10 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x11 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x12 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x13 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x14 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x15 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x16 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x17 = IMPLICIT_DEF
- ; EXPAND-NEXT: $x18 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p0, 1, 0
- ; EXPAND-NEXT: $fp = ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: STR_ZXI $z0, $fp, 0 :: (store (s128) into %stack.0)
- ;
- ; EXPAND-NEXT: $p0 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p1 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p2 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p3 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p4 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p5 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p6 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p7 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p8 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p9 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p10 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p11 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p12 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p13 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p14 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $z0 = LDR_ZXI killed $fp, 0 :: (load (s128) from %stack.0)
- ; EXPAND-NEXT: $fp = MRS 55824, implicit-def $nzcv, implicit $nzcv
- ; EXPAND-NEXT: $p0 = PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p0 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: MSR 55824, $fp, implicit-def $nzcv
- ;
- ; EXPAND-NEXT: FAKE_USE implicit $nzcv, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18
- ;
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.13)
- ; EXPAND-NEXT: $p1 = frame-destroy PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p15 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.12)
- ; EXPAND-NEXT: $p14 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.11)
- ; EXPAND-NEXT: $p13 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.10)
- ; EXPAND-NEXT: $p12 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 4 :: (load (s128) from %stack.9)
- ; EXPAND-NEXT: $p11 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 5 :: (load (s128) from %stack.8)
- ; EXPAND-NEXT: $p10 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 6 :: (load (s128) from %stack.7)
- ; EXPAND-NEXT: $p9 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 7 :: (load (s128) from %stack.6)
- ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 8 :: (load (s128) from %stack.5)
- ; EXPAND-NEXT: $p7 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 9 :: (load (s128) from %stack.4)
- ; EXPAND-NEXT: $p6 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 10 :: (load (s128) from %stack.3)
- ; EXPAND-NEXT: $p5 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 11 :: (load (s128) from %stack.2)
- ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p1, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 12, implicit $vg
- ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.14)
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
- ; EXPAND-NEXT: RET undef $lr, implicit $p0, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit $x4, implicit $x5, implicit $x6, implicit $x7, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18
- $nzcv = IMPLICIT_DEF
- $x8 = IMPLICIT_DEF
- $x9 = IMPLICIT_DEF
- $x10 = IMPLICIT_DEF
- $x11 = IMPLICIT_DEF
- $x12 = IMPLICIT_DEF
- $x13 = IMPLICIT_DEF
- $x14 = IMPLICIT_DEF
- $x15 = IMPLICIT_DEF
- $x16 = IMPLICIT_DEF
- $x17 = IMPLICIT_DEF
- $x18 = IMPLICIT_DEF
-
- %1:ppr = COPY $p0
-
- $p0 = IMPLICIT_DEF
- $p1 = IMPLICIT_DEF
- $p2 = IMPLICIT_DEF
- $p3 = IMPLICIT_DEF
- $p4 = IMPLICIT_DEF
- $p5 = IMPLICIT_DEF
- $p6 = IMPLICIT_DEF
- $p7 = IMPLICIT_DEF
- $p8 = IMPLICIT_DEF
- $p9 = IMPLICIT_DEF
- $p10 = IMPLICIT_DEF
- $p11 = IMPLICIT_DEF
- $p12 = IMPLICIT_DEF
- $p13 = IMPLICIT_DEF
- $p14 = IMPLICIT_DEF
- $p15 = IMPLICIT_DEF
-
- $p0 = COPY %1
-
- FAKE_USE implicit $nzcv, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18
-
- RET_ReallyLR implicit $p0, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit $x4, implicit $x5, implicit $x6, implicit $x7, implicit $x8, implicit $x9, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x18
-...
----
-name: zpr_predicate_spill__spill_zpr
-tracksRegLiveness: true
-stack:
-liveins:
- - { reg: '$p0' }
- - { reg: '$z0' }
- - { reg: '$z1' }
- - { reg: '$z2' }
- - { reg: '$z3' }
- - { reg: '$z4' }
- - { reg: '$z5' }
- - { reg: '$z6' }
- - { reg: '$z7' }
-body: |
- bb.0.entry:
- liveins: $p0, $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
-
- ; CHECK-LABEL: name: zpr_predicate_spill__spill_zpr
- ; CHECK: stack:
- ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 16, alignment: 16,
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register:
- ; CHECK: liveins: $p0, $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
- ; CHECK-NEXT: {{ $}}
- ;
- ; CHECK-NEXT: $z16 = IMPLICIT_DEF
- ; CHECK-NEXT: $z17 = IMPLICIT_DEF
- ; CHECK-NEXT: $z18 = IMPLICIT_DEF
- ; CHECK-NEXT: $z19 = IMPLICIT_DEF
- ; CHECK-NEXT: $z20 = IMPLICIT_DEF
- ; CHECK-NEXT: $z21 = IMPLICIT_DEF
- ; CHECK-NEXT: $z22 = IMPLICIT_DEF
- ; CHECK-NEXT: $z23 = IMPLICIT_DEF
- ; CHECK-NEXT: $z24 = IMPLICIT_DEF
- ; CHECK-NEXT: $z25 = IMPLICIT_DEF
- ; CHECK-NEXT: $z26 = IMPLICIT_DEF
- ; CHECK-NEXT: $z27 = IMPLICIT_DEF
- ; CHECK-NEXT: $z28 = IMPLICIT_DEF
- ; CHECK-NEXT: $z29 = IMPLICIT_DEF
- ; CHECK-NEXT: $z30 = IMPLICIT_DEF
- ; CHECK-NEXT: $z31 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p0, %stack.0, 0 :: (store (s128) into %stack.0)
- ;
- ; CHECK-NEXT: $p0 = IMPLICIT_DEF
- ; CHECK-NEXT: $p1 = IMPLICIT_DEF
- ; CHECK-NEXT: $p2 = IMPLICIT_DEF
- ; CHECK-NEXT: $p3 = IMPLICIT_DEF
- ; CHECK-NEXT: $p4 = IMPLICIT_DEF
- ; CHECK-NEXT: $p5 = IMPLICIT_DEF
- ; CHECK-NEXT: $p6 = IMPLICIT_DEF
- ; CHECK-NEXT: $p7 = IMPLICIT_DEF
- ; CHECK-NEXT: $p8 = IMPLICIT_DEF
- ; CHECK-NEXT: $p9 = IMPLICIT_DEF
- ; CHECK-NEXT: $p10 = IMPLICIT_DEF
- ; CHECK-NEXT: $p11 = IMPLICIT_DEF
- ; CHECK-NEXT: $p12 = IMPLICIT_DEF
- ; CHECK-NEXT: $p13 = IMPLICIT_DEF
- ; CHECK-NEXT: $p14 = IMPLICIT_DEF
- ; CHECK-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: $p0 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0 :: (load (s128) from %stack.0)
- ;
- ; CHECK-NEXT: FAKE_USE implicit $z16, implicit $z17, implicit $z18, implicit $z19, implicit $z20, implicit $z21, implicit $z22, implicit $z23, implicit $z24, implicit $z25, implicit $z26, implicit $z27, implicit $z28, implicit $z29, implicit $z30, implicit $z31
- ;
- ; CHECK-NEXT: RET_ReallyLR implicit $p0, implicit $z0, implicit $z1, implicit $z2, implicit $z3, implicit $z4, implicit $z5, implicit $z6, implicit $z7
-
- ; EXPAND-LABEL: name: zpr_predicate_spill__spill_zpr
- ; EXPAND: liveins: $p0, $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7, $fp, $p15, $p14, $p13, $p12, $p11, $p10, $p9, $p8, $p7, $p6, $p5, $p4, $z23, $z22, $z21, $z20, $z19, $z18, $z17, $z16
- ; EXPAND-NEXT: {{ $}}
- ;
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
- ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.22)
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -20, implicit $vg
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p15, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 0 :: (store (s128) into %stack.21)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p14, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 1 :: (store (s128) into %stack.20)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p13, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 2 :: (store (s128) into %stack.19)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p12, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 3 :: (store (s128) into %stack.18)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p11, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 4 :: (store (s128) into %stack.17)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p10, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 5 :: (store (s128) into %stack.16)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p9, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 6 :: (store (s128) into %stack.15)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p8, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 7 :: (store (s128) into %stack.14)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p7, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 8 :: (store (s128) into %stack.13)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p6, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 9 :: (store (s128) into %stack.12)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p5, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 10 :: (store (s128) into %stack.11)
- ; EXPAND-NEXT: $z24 = frame-setup CPY_ZPzI_B killed $p4, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z24, $sp, 11 :: (store (s128) into %stack.10)
- ; EXPAND-NEXT: frame-setup STR_ZXI killed $z23, $sp, 12 :: (store (s128) into %stack.9)
- ; EXPAND-NEXT: frame-setup STR_ZXI killed $z22, $sp, 13 :: (store (s128) into %stack.8)
- ; EXPAND-NEXT: frame-setup STR_ZXI killed $z21, $sp, 14 :: (store (s128) into %stack.7)
- ; EXPAND-NEXT: frame-setup STR_ZXI killed $z20, $sp, 15 :: (store (s128) into %stack.6)
- ; EXPAND-NEXT: frame-setup STR_ZXI killed $z19, $sp, 16 :: (store (s128) into %stack.5)
- ; EXPAND-NEXT: frame-setup STR_ZXI killed $z18, $sp, 17 :: (store (s128) into %stack.4)
- ; EXPAND-NEXT: frame-setup STR_ZXI killed $z17, $sp, 18 :: (store (s128) into %stack.3)
- ; EXPAND-NEXT: frame-setup STR_ZXI killed $z16, $sp, 19 :: (store (s128) into %stack.2)
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg
- ;
- ; EXPAND-NEXT: $z16 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z17 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z18 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z19 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z20 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z21 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z22 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z23 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z24 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z25 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z26 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z27 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z28 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z29 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z30 = IMPLICIT_DEF
- ; EXPAND-NEXT: $z31 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $x8 = ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: STR_ZXI $z0, $x8, 0 :: (store (s128) into %stack.24)
- ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p0, 1, 0
- ; EXPAND-NEXT: STR_ZXI $z0, $x8, 1 :: (store (s128) into %stack.0)
- ; EXPAND-NEXT: $z0 = LDR_ZXI $x8, 0 :: (load (s128) from %stack.24)
- ;
- ; EXPAND-NEXT: $p0 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p1 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p2 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p3 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p4 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p5 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p6 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p7 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p8 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p9 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p10 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p11 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p12 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p13 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p14 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: STR_ZXI $z0, $x8, 0 :: (store (s128) into %stack.24)
- ; EXPAND-NEXT: $z0 = LDR_ZXI $x8, 1 :: (load (s128) from %stack.0)
- ; EXPAND-NEXT: $p0 = PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p0 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = LDR_ZXI killed $x8, 0 :: (load (s128) from %stack.24)
- ;
- ; EXPAND-NEXT: FAKE_USE implicit $z16, implicit $z17, implicit $z18, implicit $z19, implicit $z20, implicit $z21, implicit $z22, implicit $z23, implicit $z24, implicit $z25, implicit $z26, implicit $z27, implicit $z28, implicit $z29, implicit $z30, implicit $z31
- ;
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg
- ; EXPAND-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 12 :: (load (s128) from %stack.9)
- ; EXPAND-NEXT: $z22 = frame-destroy LDR_ZXI $sp, 13 :: (load (s128) from %stack.8)
- ; EXPAND-NEXT: $z21 = frame-destroy LDR_ZXI $sp, 14 :: (load (s128) from %stack.7)
- ; EXPAND-NEXT: $z20 = frame-destroy LDR_ZXI $sp, 15 :: (load (s128) from %stack.6)
- ; EXPAND-NEXT: $z19 = frame-destroy LDR_ZXI $sp, 16 :: (load (s128) from %stack.5)
- ; EXPAND-NEXT: $z18 = frame-destroy LDR_ZXI $sp, 17 :: (load (s128) from %stack.4)
- ; EXPAND-NEXT: $z17 = frame-destroy LDR_ZXI $sp, 18 :: (load (s128) from %stack.3)
- ; EXPAND-NEXT: $z16 = frame-destroy LDR_ZXI $sp, 19 :: (load (s128) from %stack.2)
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.21)
- ; EXPAND-NEXT: $p1 = frame-destroy PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p15 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.20)
- ; EXPAND-NEXT: $p14 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.19)
- ; EXPAND-NEXT: $p13 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.18)
- ; EXPAND-NEXT: $p12 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 4 :: (load (s128) from %stack.17)
- ; EXPAND-NEXT: $p11 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 5 :: (load (s128) from %stack.16)
- ; EXPAND-NEXT: $p10 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 6 :: (load (s128) from %stack.15)
- ; EXPAND-NEXT: $p9 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 7 :: (load (s128) from %stack.14)
- ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 8 :: (load (s128) from %stack.13)
- ; EXPAND-NEXT: $p7 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 9 :: (load (s128) from %stack.12)
- ; EXPAND-NEXT: $p6 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 10 :: (load (s128) from %stack.11)
- ; EXPAND-NEXT: $p5 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z24 = frame-destroy LDR_ZXI $sp, 11 :: (load (s128) from %stack.10)
- ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p1, $z24, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 20, implicit $vg
- ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.22)
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
- ; EXPAND-NEXT: RET undef $lr, implicit $p0, implicit $z0, implicit $z1, implicit $z2, implicit $z3, implicit $z4, implicit $z5, implicit $z6, implicit $z7
- $z16 = IMPLICIT_DEF
- $z17 = IMPLICIT_DEF
- $z18 = IMPLICIT_DEF
- $z19 = IMPLICIT_DEF
- $z20 = IMPLICIT_DEF
- $z21 = IMPLICIT_DEF
- $z22 = IMPLICIT_DEF
- $z23 = IMPLICIT_DEF
- $z24 = IMPLICIT_DEF
- $z25 = IMPLICIT_DEF
- $z26 = IMPLICIT_DEF
- $z27 = IMPLICIT_DEF
- $z28 = IMPLICIT_DEF
- $z29 = IMPLICIT_DEF
- $z30 = IMPLICIT_DEF
- $z31 = IMPLICIT_DEF
-
- %1:ppr = COPY $p0
-
- $p0 = IMPLICIT_DEF
- $p1 = IMPLICIT_DEF
- $p2 = IMPLICIT_DEF
- $p3 = IMPLICIT_DEF
- $p4 = IMPLICIT_DEF
- $p5 = IMPLICIT_DEF
- $p6 = IMPLICIT_DEF
- $p7 = IMPLICIT_DEF
- $p8 = IMPLICIT_DEF
- $p9 = IMPLICIT_DEF
- $p10 = IMPLICIT_DEF
- $p11 = IMPLICIT_DEF
- $p12 = IMPLICIT_DEF
- $p13 = IMPLICIT_DEF
- $p14 = IMPLICIT_DEF
- $p15 = IMPLICIT_DEF
-
- $p0 = COPY %1
-
- FAKE_USE implicit $z16, implicit $z17, implicit $z18, implicit $z19, implicit $z20, implicit $z21, implicit $z22, implicit $z23, implicit $z24, implicit $z25, implicit $z26, implicit $z27, implicit $z28, implicit $z29, implicit $z30, implicit $z31
-
- RET_ReallyLR implicit $p0, implicit $z0, implicit $z1, implicit $z2, implicit $z3, implicit $z4, implicit $z5, implicit $z6, implicit $z7
-...
----
-name: zpr_predicate_spill_above_p7
-tracksRegLiveness: true
-stack:
-liveins:
- - { reg: '$p0' }
- - { reg: '$p1' }
- - { reg: '$p2' }
- - { reg: '$p3' }
-body: |
- bb.0.entry:
- liveins: $p0, $p1, $p2, $p3
-
- ; CHECK-LABEL: name: zpr_predicate_spill_above_p7
- ; CHECK: stack:
- ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 16, alignment: 16,
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register:
- ; CHECK: liveins: $p0, $p1, $p2, $p3
- ; CHECK-NEXT: {{ $}}
- ;
- ; CHECK-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: SPILL_PPR_TO_ZPR_SLOT_PSEUDO $p15, %stack.0, 0 :: (store (s128) into %stack.0)
- ;
- ; CHECK-NEXT: $p0 = IMPLICIT_DEF
- ; CHECK-NEXT: $p1 = IMPLICIT_DEF
- ; CHECK-NEXT: $p2 = IMPLICIT_DEF
- ; CHECK-NEXT: $p3 = IMPLICIT_DEF
- ; CHECK-NEXT: $p4 = IMPLICIT_DEF
- ; CHECK-NEXT: $p5 = IMPLICIT_DEF
- ; CHECK-NEXT: $p6 = IMPLICIT_DEF
- ; CHECK-NEXT: $p7 = IMPLICIT_DEF
- ; CHECK-NEXT: $p8 = IMPLICIT_DEF
- ; CHECK-NEXT: $p9 = IMPLICIT_DEF
- ; CHECK-NEXT: $p10 = IMPLICIT_DEF
- ; CHECK-NEXT: $p11 = IMPLICIT_DEF
- ; CHECK-NEXT: $p12 = IMPLICIT_DEF
- ; CHECK-NEXT: $p13 = IMPLICIT_DEF
- ; CHECK-NEXT: $p14 = IMPLICIT_DEF
- ; CHECK-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: $p15 = FILL_PPR_FROM_ZPR_SLOT_PSEUDO %stack.0, 0 :: (load (s128) from %stack.0)
- ;
- ; CHECK-NEXT: FAKE_USE implicit $p4, implicit $p5, implicit $p6, implicit $p7
- ;
- ; CHECK-NEXT: RET_ReallyLR implicit $p0, implicit $p1, implicit $p2, implicit $p3
-
- ; EXPAND-LABEL: name: zpr_predicate_spill_above_p7
- ; EXPAND: liveins: $p0, $p1, $p2, $p3, $fp, $p15, $p14, $p13, $p12, $p11, $p10, $p9, $p8, $p7, $p6, $p5, $p4
- ; EXPAND-NEXT: {{ $}}
- ;
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
- ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.14)
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -12, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p15, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.13)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p14, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.12)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p13, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 2 :: (store (s128) into %stack.11)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p12, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 3 :: (store (s128) into %stack.10)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p11, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 4 :: (store (s128) into %stack.9)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p10, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 5 :: (store (s128) into %stack.8)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p9, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 6 :: (store (s128) into %stack.7)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 7 :: (store (s128) into %stack.6)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p7, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 8 :: (store (s128) into %stack.5)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p6, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 9 :: (store (s128) into %stack.4)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p5, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 10 :: (store (s128) into %stack.3)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 11 :: (store (s128) into %stack.2)
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg
- ;
- ; EXPAND-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p15, 1, 0
- ; EXPAND-NEXT: $x8 = ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: STR_ZXI $z0, $x8, 1 :: (store (s128) into %stack.0)
- ;
- ; EXPAND-NEXT: $p0 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p1 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p2 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p3 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p4 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p5 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p6 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p7 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p8 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p9 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p10 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p11 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p12 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p13 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p14 = IMPLICIT_DEF
- ; EXPAND-NEXT: $p15 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $z0 = CPY_ZPzI_B $p0, 1, 0
- ; EXPAND-NEXT: STR_ZXI $z0, $x8, 0 :: (store (s128) into %stack.16)
- ; EXPAND-NEXT: $z0 = LDR_ZXI $x8, 1 :: (load (s128) from %stack.0)
- ; EXPAND-NEXT: $p0 = PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p15 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = LDR_ZXI killed $x8, 0 :: (load (s128) from %stack.16)
- ; EXPAND-NEXT: $p0 = PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p0 = CMPNE_PPzZI_B $p0, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ;
- ; EXPAND-NEXT: FAKE_USE implicit $p4, implicit $p5, implicit $p6, implicit $p7
- ;
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.13)
- ; EXPAND-NEXT: $p4 = frame-destroy PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p15 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.12)
- ; EXPAND-NEXT: $p14 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.11)
- ; EXPAND-NEXT: $p13 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 3 :: (load (s128) from %stack.10)
- ; EXPAND-NEXT: $p12 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 4 :: (load (s128) from %stack.9)
- ; EXPAND-NEXT: $p11 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 5 :: (load (s128) from %stack.8)
- ; EXPAND-NEXT: $p10 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 6 :: (load (s128) from %stack.7)
- ; EXPAND-NEXT: $p9 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 7 :: (load (s128) from %stack.6)
- ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 8 :: (load (s128) from %stack.5)
- ; EXPAND-NEXT: $p7 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 9 :: (load (s128) from %stack.4)
- ; EXPAND-NEXT: $p6 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 10 :: (load (s128) from %stack.3)
- ; EXPAND-NEXT: $p5 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 11 :: (load (s128) from %stack.2)
- ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 12, implicit $vg
- ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.14)
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
- ; EXPAND-NEXT: RET undef $lr, implicit $p0, implicit $p1, implicit $p2, implicit $p3
- $p15 = IMPLICIT_DEF
- %1:ppr = COPY $p15
-
- $p0 = IMPLICIT_DEF
- $p1 = IMPLICIT_DEF
- $p2 = IMPLICIT_DEF
- $p3 = IMPLICIT_DEF
- $p4 = IMPLICIT_DEF
- $p5 = IMPLICIT_DEF
- $p6 = IMPLICIT_DEF
- $p7 = IMPLICIT_DEF
- $p8 = IMPLICIT_DEF
- $p9 = IMPLICIT_DEF
- $p10 = IMPLICIT_DEF
- $p11 = IMPLICIT_DEF
- $p12 = IMPLICIT_DEF
- $p13 = IMPLICIT_DEF
- $p14 = IMPLICIT_DEF
- $p15 = IMPLICIT_DEF
-
- $p15 = COPY %1
-
- FAKE_USE implicit $p4, implicit $p5, implicit $p6, implicit $p7
-
- RET_ReallyLR implicit $p0, implicit $p1, implicit $p2, implicit $p3
-...
----
-name: zpr_predicate_spill_p4_saved
-tracksRegLiveness: true
-stack:
-liveins:
- - { reg: '$p0' }
- - { reg: '$p1' }
- - { reg: '$p2' }
- - { reg: '$p3' }
-body: |
- bb.0.entry:
- liveins: $p0, $p1, $p2, $p3
-
- ; CHECK-LABEL: name: zpr_predicate_spill_p4_saved
- ; CHECK: liveins: $p0, $p1, $p2, $p3
- ; CHECK-NEXT: {{ $}}
- ;
- ; CHECK-NEXT: $p8 = IMPLICIT_DEF
- ;
- ; CHECK-NEXT: RET_ReallyLR implicit $p0, implicit $p1, implicit $p2, implicit $p3
-
- ; EXPAND-LABEL: name: zpr_predicate_spill_p4_saved
- ; EXPAND: liveins: $p0, $p1, $p2, $p3, $fp, $p8, $p4
- ; EXPAND-NEXT: {{ $}}
- ; EXPAND-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
- ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.1)
- ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.0)
- ;
- ; EXPAND-NEXT: $p8 = IMPLICIT_DEF
- ;
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.1)
- ; EXPAND-NEXT: $p4 = frame-destroy PTRUE_B 31, implicit $vg
- ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.0)
- ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg
- ; EXPAND-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
- ; EXPAND-NEXT: RET undef $lr, implicit $p0, implicit $p1, implicit $p2, implicit $p3
-
- ; If we spill a register above p8, p4 must also be saved, so we can guarantee
- ; they will be a register (in the range p0-p7 to for the cmpne reload).
- $p8 = IMPLICIT_DEF
-
- RET_ReallyLR implicit $p0, implicit $p1, implicit $p2, implicit $p3
-...
diff --git a/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll b/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll
index 01e3d3a..c0a2943 100644
--- a/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll
+++ b/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll
@@ -1,7 +1,5 @@
; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -pass-remarks-analysis=sme -aarch64-stack-hazard-remark-size=64 -o /dev/null < %s 2>&1 | FileCheck %s --check-prefixes=CHECK
; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -pass-remarks-analysis=sme -aarch64-stack-hazard-size=1024 -o /dev/null < %s 2>&1 | FileCheck %s --check-prefixes=CHECK-PADDING
-; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -pass-remarks-analysis=sme -aarch64-enable-zpr-predicate-spills -aarch64-stack-hazard-remark-size=64 -o /dev/null < %s 2>&1 | FileCheck %s --check-prefixes=CHECK-ZPR-PRED-SPILLS
-; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -pass-remarks-analysis=sme -aarch64-enable-zpr-predicate-spills -aarch64-stack-hazard-size=1024 -o /dev/null < %s 2>&1 | FileCheck %s --check-prefixes=CHECK-ZPR-PRED-SPILLS-WITH-PADDING
; Don't emit remarks for non-streaming functions.
define float @csr_x20_stackargs_notsc(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, float %i) {
@@ -69,16 +67,11 @@ entry:
; SVE calling conventions
; Padding is placed between predicate and fpr/zpr register spills, so only emit remarks when hazard padding is off.
-; Note: The -aarch64-enable-zpr-predicate-spills option is deprecated (and will be removed soon).
define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) #2 {
; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at [SP-64-258 * vscale] is too close to FPR stack object at [SP-64-256 * vscale]
; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64]
; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'svecc_call':
-; CHECK-ZPR-PRED-SPILLS-NOT: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at {{.*}} is too close to FPR stack object
-; CHECK-ZPR-PRED-SPILLS: <unknown>:0:0: stack hazard in 'svecc_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64]
-; CHECK-ZPR-PRED-SPILLS-WITH-PADDING-NOT: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at {{.*}} is too close to FPR stack object
-; CHECK-ZPR-PRED-SPILLS-WITH-PADDING-NOT: <unknown>:0:0: stack hazard in 'svecc_call': FPR stack object at {{.*}} is too close to GPR stack object
entry:
tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
%call = call ptr @memset(ptr noundef nonnull %P1, i32 noundef 45, i32 noundef 37)
@@ -89,10 +82,6 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': PPR stack object at [SP-64-258 * vscale] is too close to FPR stack object at [SP-64-256 * vscale]
; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64]
; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call':
-; CHECK-ZPR-PRED-SPILLS-NOT: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at {{.*}} is too close to FPR stack object
-; CHECK-ZPR-PRED-SPILLS: <unknown>:0:0: stack hazard in 'svecc_alloca_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64]
-; CHECK-ZPR-PRED-SPILLS-WITH-PADDING-NOT: <unknown>:0:0: stack hazard in 'svecc_alloca_call': PPR stack object at {{.*}} is too close to FPR stack object
-; CHECK-ZPR-PRED-SPILLS-WITH-PADDING-NOT: <unknown>:0:0: stack hazard in 'svecc_alloca_call': FPR stack object at {{.*}} is too close to GPR stack object
entry:
tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
%0 = alloca [37 x i8], align 16
diff --git a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll
index 6b7d704..ede470b 100644
--- a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll
+++ b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0-callable.ll
@@ -1,13 +1,11 @@
; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 < %s | FileCheck --check-prefixes=CHECK,GFX11 %s
; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 < %s | FileCheck --check-prefixes=CHECK,GFX12 %s
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=+dynamic-vgpr < %s | FileCheck --check-prefixes=CHECK,GFX12,DVGPR %s
; CHECK: .amdgpu_pal_metadata
; CHECK-NEXT: ---
; CHECK-NEXT: amdpal.pipelines:
; CHECK-NEXT: - .api: Vulkan
; CHECK-NEXT: .compute_registers:
-; DVGPR-NEXT: .dynamic_vgpr_en: true
; CHECK-NEXT: .tg_size_en: true
; CHECK-NEXT: .tgid_x_en: false
; CHECK-NEXT: .tgid_y_en: false
diff --git a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll
index 5c0c366..5325499 100644
--- a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll
+++ b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.0.ll
@@ -1,17 +1,14 @@
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 <%s | FileCheck %s --check-prefixes=CHECK,GFX11,NODVGPR
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 <%s | FileCheck %s --check-prefixes=CHECK,NODVGPR
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=+dynamic-vgpr <%s | FileCheck %s --check-prefixes=CHECK,DVGPR
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 <%s | FileCheck %s --check-prefixes=CHECK,GFX11
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 <%s | FileCheck %s --check-prefixes=CHECK
; CHECK-LABEL: {{^}}_amdgpu_cs_main:
-; NODVGPR: ; TotalNumSgprs: 4
-; DVGPR: ; TotalNumSgprs: 34
+; CHECK: ; TotalNumSgprs: 4
; CHECK: ; NumVgprs: 2
; CHECK: .amdgpu_pal_metadata
; CHECK-NEXT: ---
; CHECK-NEXT: amdpal.pipelines:
; CHECK-NEXT: - .api: Vulkan
; CHECK-NEXT: .compute_registers:
-; DVGPR-NEXT: .dynamic_vgpr_en: true
; CHECK-NEXT: .tg_size_en: true
; CHECK-NEXT: .tgid_x_en: false
; CHECK-NEXT: .tgid_y_en: false
@@ -57,7 +54,6 @@
; CHECK-NEXT: .cs:
; CHECK-NEXT: .checksum_value: 0x9444d7d0
; CHECK-NEXT: .debug_mode: false
-; DVGPR-NEXT: .dynamic_vgpr_saved_count: 0x70
; CHECK-NEXT: .entry_point: _amdgpu_cs_main
; CHECK-NEXT: .entry_point_symbol: _amdgpu_cs_main
; CHECK-NEXT: .excp_en: 0
@@ -69,8 +65,7 @@
; CHECK-NEXT: .mem_ordered: true
; CHECK-NEXT: .scratch_en: false
; CHECK-NEXT: .scratch_memory_size: 0
-; NODVGPR-NEXT: .sgpr_count: 0x4
-; DVGPR-NEXT: .sgpr_count: 0x22
+; CHECK-NEXT: .sgpr_count: 0x4
; CHECK-NEXT: .sgpr_limit: 0x6a
; CHECK-NEXT: .threadgroup_dimensions:
; CHECK-NEXT: - 0x1
diff --git a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6-dvgpr.ll b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6-dvgpr.ll
new file mode 100644
index 0000000..e598b0c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6-dvgpr.ll
@@ -0,0 +1,204 @@
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 <%s | FileCheck %s --check-prefixes=CHECK
+
+; CHECK-LABEL: {{^}}_amdgpu_cs_main:
+; CHECK: ; TotalNumSgprs: 34
+; CHECK: ; NumVgprs: 2
+; CHECK: .amdgpu_pal_metadata
+; CHECK-NEXT: ---
+; CHECK-NEXT: amdpal.pipelines:
+; CHECK-NEXT: - .api: Vulkan
+; CHECK-NEXT: .compute_registers:
+; CHECK-NEXT: .dynamic_vgpr_en: true
+; CHECK-NEXT: .tg_size_en: true
+; CHECK-NEXT: .tgid_x_en: false
+; CHECK-NEXT: .tgid_y_en: false
+; CHECK-NEXT: .tgid_z_en: false
+; CHECK-NEXT: .tidig_comp_cnt: 0x1
+; CHECK-NEXT: .graphics_registers:
+; CHECK-NEXT: .ps_extra_lds_size: 0
+; CHECK-NEXT: .spi_ps_input_addr:
+; CHECK-NEXT: .ancillary_ena: false
+; CHECK-NEXT: .front_face_ena: true
+; CHECK-NEXT: .line_stipple_tex_ena: false
+; CHECK-NEXT: .linear_center_ena: true
+; CHECK-NEXT: .linear_centroid_ena: true
+; CHECK-NEXT: .linear_sample_ena: true
+; CHECK-NEXT: .persp_center_ena: true
+; CHECK-NEXT: .persp_centroid_ena: true
+; CHECK-NEXT: .persp_pull_model_ena: false
+; CHECK-NEXT: .persp_sample_ena: true
+; CHECK-NEXT: .pos_fixed_pt_ena: true
+; CHECK-NEXT: .pos_w_float_ena: false
+; CHECK-NEXT: .pos_x_float_ena: false
+; CHECK-NEXT: .pos_y_float_ena: false
+; CHECK-NEXT: .pos_z_float_ena: false
+; CHECK-NEXT: .sample_coverage_ena: false
+; CHECK-NEXT: .spi_ps_input_ena:
+; CHECK-NEXT: .ancillary_ena: false
+; CHECK-NEXT: .front_face_ena: false
+; CHECK-NEXT: .line_stipple_tex_ena: false
+; CHECK-NEXT: .linear_center_ena: false
+; CHECK-NEXT: .linear_centroid_ena: false
+; CHECK-NEXT: .linear_sample_ena: false
+; CHECK-NEXT: .persp_center_ena: false
+; CHECK-NEXT: .persp_centroid_ena: false
+; CHECK-NEXT: .persp_pull_model_ena: false
+; CHECK-NEXT: .persp_sample_ena: true
+; CHECK-NEXT: .pos_fixed_pt_ena: false
+; CHECK-NEXT: .pos_w_float_ena: false
+; CHECK-NEXT: .pos_x_float_ena: false
+; CHECK-NEXT: .pos_y_float_ena: false
+; CHECK-NEXT: .pos_z_float_ena: false
+; CHECK-NEXT: .sample_coverage_ena: false
+; CHECK-NEXT: .hardware_stages:
+; CHECK-NEXT: .cs:
+; CHECK-NEXT: .checksum_value: 0x9444d7d0
+; CHECK-NEXT: .debug_mode: false
+; CHECK-NEXT: .dynamic_vgpr_saved_count: 0x70
+; CHECK-NOT: .entry_point: _amdgpu_cs_main
+; CHECK-NEXT: .entry_point_symbol: _amdgpu_cs_main
+; CHECK-NEXT: .excp_en: 0
+; CHECK-NEXT: .float_mode: 0xc0
+; CHECK-NEXT: .forward_progress: true
+; GFX11-NEXT: .ieee_mode: false
+; CHECK-NEXT: .image_op: false
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .mem_ordered: true
+; CHECK-NEXT: .scratch_en: false
+; CHECK-NEXT: .scratch_memory_size: 0
+; CHECK-NEXT: .sgpr_count: 0x22
+; CHECK-NEXT: .sgpr_limit: 0x6a
+; CHECK-NEXT: .threadgroup_dimensions:
+; CHECK-NEXT: - 0x1
+; CHECK-NEXT: - 0x400
+; CHECK-NEXT: - 0x1
+; CHECK-NEXT: .trap_present: false
+; CHECK-NEXT: .user_data_reg_map:
+; CHECK-NEXT: - 0x10000000
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: - 0xffffffff
+; CHECK-NEXT: .user_sgprs: 0x3
+; CHECK-NEXT: .vgpr_count: 0x2
+; CHECK-NEXT: .vgpr_limit: 0x100
+; CHECK-NEXT: .wavefront_size: 0x40
+; CHECK-NEXT: .wgp_mode: false
+; CHECK-NEXT: .gs:
+; CHECK-NEXT: .debug_mode: false
+; CHECK-NOT: .entry_point: _amdgpu_gs_main
+; CHECK-NEXT: .entry_point_symbol: gs_shader
+; CHECK-NEXT: .forward_progress: true
+; GFX11-NEXT: .ieee_mode: false
+; CHECK-NEXT: .lds_size: 0x200
+; CHECK-NEXT: .mem_ordered: true
+; CHECK-NEXT: .scratch_en: false
+; CHECK-NEXT: .scratch_memory_size: 0
+; CHECK-NEXT: .sgpr_count: 0x1
+; CHECK-NEXT: .vgpr_count: 0x1
+; CHECK-NEXT: .wgp_mode: true
+; CHECK-NEXT: .hs:
+; CHECK-NEXT: .debug_mode: false
+; CHECK-NOT: .entry_point: _amdgpu_hs_main
+; CHECK-NEXT: .entry_point_symbol: hs_shader
+; CHECK-NEXT: .forward_progress: true
+; GFX11-NEXT: .ieee_mode: false
+; CHECK-NEXT: .lds_size: 0x1000
+; CHECK-NEXT: .mem_ordered: true
+; CHECK-NEXT: .scratch_en: false
+; CHECK-NEXT: .scratch_memory_size: 0
+; CHECK-NEXT: .sgpr_count: 0x1
+; CHECK-NEXT: .vgpr_count: 0x1
+; CHECK-NEXT: .wgp_mode: true
+; CHECK-NEXT: .ps:
+; CHECK-NEXT: .debug_mode: false
+; CHECK-NOT: .entry_point: _amdgpu_ps_main
+; CHECK-NEXT: .entry_point_symbol: ps_shader
+; CHECK-NEXT: .forward_progress: true
+; GFX11-NEXT: .ieee_mode: false
+; CHECK-NEXT: .lds_size: 0
+; CHECK-NEXT: .mem_ordered: true
+; CHECK-NEXT: .scratch_en: false
+; CHECK-NEXT: .scratch_memory_size: 0
+; CHECK-NEXT: .sgpr_count: 0x1
+; CHECK-NEXT: .vgpr_count: 0x1
+; CHECK-NEXT: .wgp_mode: true
+; CHECK: .registers: {}
+; CHECK:amdpal.version:
+; CHECK-NEXT: - 0x3
+; CHECK-NEXT: - 0x6
+; CHECK-NEXT:...
+; CHECK-NEXT: .end_amdgpu_pal_metadata
+
+define dllexport amdgpu_cs void @_amdgpu_cs_main(i32 inreg %arg1, i32 %arg2) #0 !lgc.shaderstage !1 {
+.entry:
+ %i = call i64 @llvm.amdgcn.s.getpc()
+ %i1 = and i64 %i, -4294967296
+ %i2 = zext i32 %arg1 to i64
+ %i3 = or i64 %i1, %i2
+ %i4 = inttoptr i64 %i3 to ptr addrspace(4)
+ %i5 = and i32 %arg2, 1023
+ %i6 = lshr i32 %arg2, 10
+ %i7 = and i32 %i6, 1023
+ %i8 = add nuw nsw i32 %i7, %i5
+ %i9 = load <4 x i32>, ptr addrspace(4) %i4, align 16
+ %.idx = shl nuw nsw i32 %i8, 2
+ call void @llvm.amdgcn.raw.buffer.store.i32(i32 1, <4 x i32> %i9, i32 %.idx, i32 0, i32 0)
+ ret void
+}
+
+define dllexport amdgpu_ps void @ps_shader() #1 {
+ ret void
+}
+
+@LDS.GS = external addrspace(3) global [1 x i32], align 4
+
+define dllexport amdgpu_gs void @gs_shader() {
+ %ptr = getelementptr i32, ptr addrspace(3) @LDS.GS, i32 0
+ store i32 0, ptr addrspace(3) %ptr, align 4
+ ret void
+}
+
+@LDS.HS = external addrspace(3) global [1024 x i32], align 4
+
+define dllexport amdgpu_hs void @hs_shader() {
+ %ptr = getelementptr i32, ptr addrspace(3) @LDS.HS, i32 0
+ store i32 0, ptr addrspace(3) %ptr, align 4
+ ret void
+}
+
+!amdgpu.pal.metadata.msgpack = !{!0}
+
+attributes #0 = { nounwind memory(readwrite) "target-features"=",+wavefrontsize64,+cumode" "amdgpu-dynamic-vgpr-block-size"="16" }
+
+attributes #1 = { nounwind memory(readwrite) "InitialPSInputAddr"="36983" "amdgpu-dynamic-vgpr-block-size"="16" }
+
+!0 = !{!"\82\B0amdpal.pipelines\91\8A\A4.api\A6Vulkan\B2.compute_registers\85\AB.tg_size_en\C3\AA.tgid_x_en\C2\AA.tgid_y_en\C2\AA.tgid_z_en\C2\AF.tidig_comp_cnt\01\B0.hardware_stages\81\A3.cs\8C\AF.checksum_value\CE\94D\D7\D0\AB.debug_mode\00\AB.float_mode\CC\C0\A9.image_op\C2\AC.mem_ordered\C3\AB.sgpr_limitj\B7.threadgroup_dimensions\93\01\CD\04\00\01\AD.trap_present\00\B2.user_data_reg_map\DC\00 \CE\10\00\00\00\CE\FF\FF\FF\FF\00\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\CE\FF\FF\FF\FF\AB.user_sgprs\03\AB.vgpr_limit\CD\01\00\AF.wavefront_size@\B7.internal_pipeline_hash\92\CF\E7\10k\A6:\A6%\F7\CF\B2\1F\1A\D4{\DA\E1T\AA.registers\80\A8.shaders\81\A8.compute\82\B0.api_shader_hash\92\CF\E9Zn7}\1E\B9\E7\00\B1.hardware_mapping\91\A3.cs\B0.spill_threshold\CE\FF\FF\FF\FF\A5.type\A2Cs\B0.user_data_limit\01\AF.xgl_cache_info\82\B3.128_bit_cache_hash\92\CF\B4X\B8\11[\A4\88P\CF\A0;\B0\AF\FF\B4\BE\C0\AD.llpc_version\A461.1\AEamdpal.version\92\03\06"}
+!1 = !{i32 7}
diff --git a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6.ll b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6.ll
index 830872a..d2f26e8 100644
--- a/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6.ll
+++ b/llvm/test/CodeGen/AMDGPU/pal-metadata-3.6.ll
@@ -1,17 +1,14 @@
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 <%s | FileCheck %s --check-prefixes=CHECK,GFX11,NODVGPR
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 <%s | FileCheck %s --check-prefixes=CHECK,NODVGPR
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=+dynamic-vgpr <%s | FileCheck %s --check-prefixes=CHECK,DVGPR
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1100 <%s | FileCheck %s --check-prefixes=CHECK,GFX11
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 <%s | FileCheck %s --check-prefixes=CHECK
; CHECK-LABEL: {{^}}_amdgpu_cs_main:
-; NODVGPR: ; TotalNumSgprs: 4
-; DVGPR: ; TotalNumSgprs: 34
+; CHECK: ; TotalNumSgprs: 4
; CHECK: ; NumVgprs: 2
; CHECK: .amdgpu_pal_metadata
; CHECK-NEXT: ---
; CHECK-NEXT: amdpal.pipelines:
; CHECK-NEXT: - .api: Vulkan
; CHECK-NEXT: .compute_registers:
-; DVGPR-NEXT: .dynamic_vgpr_en: true
; CHECK-NEXT: .tg_size_en: true
; CHECK-NEXT: .tgid_x_en: false
; CHECK-NEXT: .tgid_y_en: false
@@ -57,7 +54,6 @@
; CHECK-NEXT: .cs:
; CHECK-NEXT: .checksum_value: 0x9444d7d0
; CHECK-NEXT: .debug_mode: false
-; DVGPR-NEXT: .dynamic_vgpr_saved_count: 0x70
; CHECK-NOT: .entry_point: _amdgpu_cs_main
; CHECK-NEXT: .entry_point_symbol: _amdgpu_cs_main
; CHECK-NEXT: .excp_en: 0
@@ -69,8 +65,7 @@
; CHECK-NEXT: .mem_ordered: true
; CHECK-NEXT: .scratch_en: false
; CHECK-NEXT: .scratch_memory_size: 0
-; NODVGPR-NEXT: .sgpr_count: 0x4
-; DVGPR-NEXT: .sgpr_count: 0x22
+; CHECK-NEXT: .sgpr_count: 0x4
; CHECK-NEXT: .sgpr_limit: 0x6a
; CHECK-NEXT: .threadgroup_dimensions:
; CHECK-NEXT: - 0x1
diff --git a/llvm/test/CodeGen/NVPTX/convert-sm103a.ll b/llvm/test/CodeGen/NVPTX/convert-sm103a.ll
new file mode 100644
index 0000000..54b4dd8
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/convert-sm103a.ll
@@ -0,0 +1,297 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_100a -mattr=+ptx87 | FileCheck %s
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_103a -mattr=+ptx87 | FileCheck %s
+; RUN: %if ptxas-sm_100a && ptxas-isa-8.7 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_100a -mattr=+ptx87 | %ptxas-verify -arch=sm_100a %}
+; RUN: %if ptxas-sm_103a && ptxas-isa-8.7 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_103a -mattr=+ptx87 | %ptxas-verify -arch=sm_103a %}
+
+; F16X2 conversions
+
+define <2 x half> @cvt_rs_f16x2_f32(float %f1, float %f2, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_f16x2_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_f16x2_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_f16x2_f32_param_1];
+; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_f16x2_f32_param_2];
+; CHECK-NEXT: cvt.rs.f16x2.f32 %r4, %r1, %r2, %r3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-NEXT: ret;
+ %val = call <2 x half> @llvm.nvvm.ff2f16x2.rs(float %f1, float %f2, i32 %rbits)
+ ret <2 x half> %val
+}
+
+define <2 x half> @cvt_rs_relu_f16x2_f32(float %f1, float %f2, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_f16x2_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_relu_f16x2_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_relu_f16x2_f32_param_1];
+; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_relu_f16x2_f32_param_2];
+; CHECK-NEXT: cvt.rs.relu.f16x2.f32 %r4, %r1, %r2, %r3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-NEXT: ret;
+ %val = call <2 x half> @llvm.nvvm.ff2f16x2.rs.relu(float %f1, float %f2, i32 %rbits)
+ ret <2 x half> %val
+}
+
+define <2 x half> @cvt_rs_sf_f16x2_f32(float %f1, float %f2, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_sf_f16x2_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_sf_f16x2_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_sf_f16x2_f32_param_1];
+; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_sf_f16x2_f32_param_2];
+; CHECK-NEXT: cvt.rs.satfinite.f16x2.f32 %r4, %r1, %r2, %r3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-NEXT: ret;
+ %val = call <2 x half> @llvm.nvvm.ff2f16x2.rs.satfinite(float %f1, float %f2, i32 %rbits)
+ ret <2 x half> %val
+}
+
+define <2 x half> @cvt_rs_relu_sf_f16x2_f32(float %f1, float %f2, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_sf_f16x2_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_relu_sf_f16x2_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_relu_sf_f16x2_f32_param_1];
+; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_relu_sf_f16x2_f32_param_2];
+; CHECK-NEXT: cvt.rs.relu.satfinite.f16x2.f32 %r4, %r1, %r2, %r3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-NEXT: ret;
+ %val = call <2 x half> @llvm.nvvm.ff2f16x2.rs.relu.satfinite(float %f1, float %f2, i32 %rbits)
+ ret <2 x half> %val
+}
+
+; BF16X2 conversions
+
+define <2 x bfloat> @cvt_rs_bf16x2_f32(float %f1, float %f2, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_bf16x2_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_bf16x2_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_bf16x2_f32_param_1];
+; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_bf16x2_f32_param_2];
+; CHECK-NEXT: cvt.rs.bf16x2.f32 %r4, %r1, %r2, %r3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-NEXT: ret;
+ %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs(float %f1, float %f2, i32 %rbits)
+ ret <2 x bfloat> %val
+}
+
+define <2 x bfloat> @cvt_rs_relu_bf16x2_f32(float %f1, float %f2, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_bf16x2_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_relu_bf16x2_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_relu_bf16x2_f32_param_1];
+; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_relu_bf16x2_f32_param_2];
+; CHECK-NEXT: cvt.rs.relu.bf16x2.f32 %r4, %r1, %r2, %r3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-NEXT: ret;
+ %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.relu(float %f1, float %f2, i32 %rbits)
+ ret <2 x bfloat> %val
+}
+
+define <2 x bfloat> @cvt_rs_sf_bf16x2_f32(float %f1, float %f2, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_sf_bf16x2_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_sf_bf16x2_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_sf_bf16x2_f32_param_1];
+; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_sf_bf16x2_f32_param_2];
+; CHECK-NEXT: cvt.rs.satfinite.bf16x2.f32 %r4, %r1, %r2, %r3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-NEXT: ret;
+ %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.satfinite(float %f1, float %f2, i32 %rbits)
+ ret <2 x bfloat> %val
+}
+
+define <2 x bfloat> @cvt_rs_relu_sf_bf16x2_f32(float %f1, float %f2, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_sf_bf16x2_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [cvt_rs_relu_sf_bf16x2_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r2, [cvt_rs_relu_sf_bf16x2_f32_param_1];
+; CHECK-NEXT: ld.param.b32 %r3, [cvt_rs_relu_sf_bf16x2_f32_param_2];
+; CHECK-NEXT: cvt.rs.relu.satfinite.bf16x2.f32 %r4, %r1, %r2, %r3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r4;
+; CHECK-NEXT: ret;
+ %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.relu.satfinite(float %f1, float %f2, i32 %rbits)
+ ret <2 x bfloat> %val
+}
+
+; F8X4 conversions
+
+define <4 x i8> @cvt_rs_sf_e4m3x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_sf_e4m3x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_sf_e4m3x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_sf_e4m3x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.satfinite.e4m3x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call <4 x i8> @llvm.nvvm.f32x4.to.e4m3x4.rs.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret <4 x i8> %val
+}
+
+define <4 x i8> @cvt_rs_relu_sf_e4m3x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_sf_e4m3x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_relu_sf_e4m3x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_relu_sf_e4m3x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.relu.satfinite.e4m3x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call <4 x i8> @llvm.nvvm.f32x4.to.e4m3x4.rs.relu.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret <4 x i8> %val
+}
+
+define <4 x i8> @cvt_rs_sf_e5m2x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_sf_e5m2x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_sf_e5m2x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_sf_e5m2x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.satfinite.e5m2x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call <4 x i8> @llvm.nvvm.f32x4.to.e5m2x4.rs.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret <4 x i8> %val
+}
+
+define <4 x i8> @cvt_rs_relu_sf_e5m2x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_sf_e5m2x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_relu_sf_e5m2x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_relu_sf_e5m2x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.relu.satfinite.e5m2x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call <4 x i8> @llvm.nvvm.f32x4.to.e5m2x4.rs.relu.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret <4 x i8> %val
+}
+
+; F6X4 conversions
+
+define <4 x i8> @cvt_rs_sf_e2m3x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_sf_e2m3x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_sf_e2m3x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_sf_e2m3x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.satfinite.e2m3x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call <4 x i8> @llvm.nvvm.f32x4.to.e2m3x4.rs.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret <4 x i8> %val
+}
+
+define <4 x i8> @cvt_rs_relu_sf_e2m3x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_sf_e2m3x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_relu_sf_e2m3x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_relu_sf_e2m3x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.relu.satfinite.e2m3x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call <4 x i8> @llvm.nvvm.f32x4.to.e2m3x4.rs.relu.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret <4 x i8> %val
+}
+
+define <4 x i8> @cvt_rs_sf_e3m2x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_sf_e3m2x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_sf_e3m2x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_sf_e3m2x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.satfinite.e3m2x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call <4 x i8> @llvm.nvvm.f32x4.to.e3m2x4.rs.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret <4 x i8> %val
+}
+
+define <4 x i8> @cvt_rs_relu_sf_e3m2x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_sf_e3m2x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_relu_sf_e3m2x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_relu_sf_e3m2x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.relu.satfinite.e3m2x4.f32 %r6, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call <4 x i8> @llvm.nvvm.f32x4.to.e3m2x4.rs.relu.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret <4 x i8> %val
+}
+
+; F4X4 conversions
+
+define i16 @cvt_rs_sf_e2m1x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_sf_e2m1x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_sf_e2m1x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_sf_e2m1x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.satfinite.e2m1x4.f32 %rs1, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: cvt.u32.u16 %r6, %rs1;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call i16 @llvm.nvvm.f32x4.to.e2m1x4.rs.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret i16 %val
+}
+
+define i16 @cvt_rs_relu_sf_e2m1x4_f32(<4 x float> %fvec, i32 %rbits) {
+; CHECK-LABEL: cvt_rs_relu_sf_e2m1x4_f32(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [cvt_rs_relu_sf_e2m1x4_f32_param_0];
+; CHECK-NEXT: ld.param.b32 %r5, [cvt_rs_relu_sf_e2m1x4_f32_param_1];
+; CHECK-NEXT: cvt.rs.relu.satfinite.e2m1x4.f32 %rs1, {%r1, %r2, %r3, %r4}, %r5;
+; CHECK-NEXT: cvt.u32.u16 %r6, %rs1;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT: ret;
+ %val = call i16 @llvm.nvvm.f32x4.to.e2m1x4.rs.relu.satfinite(<4 x float> %fvec, i32 %rbits)
+ ret i16 %val
+}
diff --git a/llvm/test/CodeGen/NVPTX/wmma-ptx87-sm120a.py b/llvm/test/CodeGen/NVPTX/wmma-ptx87-sm120a.py
index ae781df..40055ae 100644
--- a/llvm/test/CodeGen/NVPTX/wmma-ptx87-sm120a.py
+++ b/llvm/test/CodeGen/NVPTX/wmma-ptx87-sm120a.py
@@ -2,7 +2,7 @@
# RUN: %python %s --ptx=87 --gpu-arch=120 --aa > %t-ptx87-sm_120a.ll
# RUN: llc < %t-ptx87-sm_120a.ll -mtriple=nvptx64 -mcpu=sm_120a -mattr=+ptx87 \
# RUN: | FileCheck %t-ptx87-sm_120a.ll
-# RUN: %if ptxas-12.7 %{ \
+# RUN: %if ptxas-sm_120a && ptxas-isa-8.7 %{ \
# RUN: llc < %t-ptx87-sm_120a.ll -mtriple=nvptx64 -mcpu=sm_120a -mattr=+ptx87 \
# RUN: | %ptxas-verify -arch=sm_120a \
# RUN: %}
diff --git a/llvm/test/CodeGen/NVPTX/wmma.py b/llvm/test/CodeGen/NVPTX/wmma.py
index 6d73bce..8427ae4 100644
--- a/llvm/test/CodeGen/NVPTX/wmma.py
+++ b/llvm/test/CodeGen/NVPTX/wmma.py
@@ -90,6 +90,21 @@ class MMAFrag:
"m16n8k32:b:s8": 2,
"m16n8k32:c:s32": 4,
"m16n8k32:d:s32": 4,
+ # e4m3/e5m2/e3m2/e2m3/e2m1 -> f16/f32 @ m16n8k16/m16n8k32
+ "m16n8k16:a:e4m3": 2,
+ "m16n8k16:a:e5m2": 2,
+ "m16n8k32:a:e4m3": 4,
+ "m16n8k32:a:e5m2": 4,
+ "m16n8k32:a:e3m2": 4,
+ "m16n8k32:a:e2m3": 4,
+ "m16n8k32:a:e2m1": 4,
+ "m16n8k16:b:e4m3": 1,
+ "m16n8k16:b:e5m2": 1,
+ "m16n8k32:b:e4m3": 2,
+ "m16n8k32:b:e5m2": 2,
+ "m16n8k32:b:e3m2": 2,
+ "m16n8k32:b:e2m3": 2,
+ "m16n8k32:b:e2m1": 2,
# mma sp
"m16n8k32:a:bf16": 4,
"m16n8k32:a:f16": 4,
@@ -182,6 +197,18 @@ class MMAFrag:
"m8n8k4:b:f64": 1,
"m8n8k4:c:f64": 2,
"m8n8k4:d:f64": 2,
+ "m16n8k4:a:f64": 2,
+ "m16n8k4:b:f64": 1,
+ "m16n8k4:c:f64": 4,
+ "m16n8k4:d:f64": 4,
+ "m16n8k8:a:f64": 4,
+ "m16n8k8:b:f64": 2,
+ "m16n8k8:c:f64": 4,
+ "m16n8k8:d:f64": 4,
+ "m16n8k16:a:f64": 8,
+ "m16n8k16:b:f64": 4,
+ "m16n8k16:c:f64": 4,
+ "m16n8k16:d:f64": 4,
# tf32 -> s32 @ m16n16k8
"m16n16k8:a:tf32": 4,
"m16n16k8:b:tf32": 4,
@@ -324,7 +351,9 @@ def get_wmma_ops():
def get_mma_ops():
return (
- make_mma_ops(["m8n8k4"], ["f64"], [], ["f64"], [])
+ make_mma_ops(
+ ["m8n8k4", "m16n8k4", "m16n8k8", "m16n8k16"], ["f64"], [], ["f64"], []
+ )
+ make_mma_ops(["m16n8k4", "m16n8k8"], ["tf32"], [], ["f32"], [])
+ make_mma_ops(["m16n8k16", "m16n8k8"], ["bf16"], [], ["f32"], [])
+ make_mma_ops(
@@ -341,6 +370,20 @@ def get_mma_ops():
["m8n8k32", "m16n8k32", "m16n8k64"], ["s4", "u4"], ["s4", "u4"], ["s32"], []
)
+ make_mma_ops(["m8n8k128", "m16n8k128", "m16n8k256"], ["b1"], [], ["s32"], [])
+ + make_mma_ops(
+ ["m16n8k16"],
+ ["e4m3", "e5m2"],
+ ["e4m3", "e5m2"],
+ ["f16", "f32"],
+ ["f16", "f32"],
+ )
+ + make_mma_ops(
+ ["m16n8k32"],
+ ["e4m3", "e5m2", "e3m2", "e2m3", "e2m1"],
+ ["e4m3", "e5m2", "e3m2", "e2m3", "e2m1"],
+ ["f16", "f32"],
+ ["f16", "f32"],
+ )
)
@@ -492,7 +535,7 @@ def is_wmma_variant_supported(op, layout_a, layout_b, rnd, satf):
return True
-def is_mma_variant_supported(op, layout_a, layout_b, satf):
+def is_mma_variant_supported(op, layout_a, layout_b, kind, satf):
if not (
is_type_supported(op.a.mma_type.ptx_type) and is_mma_geom_supported(op.a.geom)
):
@@ -516,13 +559,53 @@ def is_mma_variant_supported(op, layout_a, layout_b, satf):
):
return False
+ if (
+ op.a.geom != "m8n8k4"
+ and op.a.mma_type.ptx_type == "f64"
+ and (ptx_version < 78 or gpu_arch < 90)
+ ):
+ return False
+
# C and D type must be the same
- if op.a.geom == "m16n8k16" and op.c.mma_type.ptx_type != op.d.mma_type.ptx_type:
+ if (
+ op.a.geom in ["m16n8k16", "m16n8k32"]
+ and op.c.mma_type.ptx_type != op.d.mma_type.ptx_type
+ ):
+ return False
+
+ if (
+ op.a.geom in ["m16n8k16", "m16n8k32"]
+ and any(
+ x in ["e4m3", "e5m2"]
+ for x in (op.a.mma_type.ptx_type, op.b.mma_type.ptx_type)
+ )
+ and ptx_version < 87
+ ):
+ return False
+
+ if kind != "" and not (ptx_version >= 87 and gpu_arch >= 120 and aa):
+ return False
+
+ if kind != "" and (
+ op.a.geom != "m16n8k32"
+ or op.a.mma_type.ptx_type not in ["e4m3", "e5m2", "e3m2", "e2m3", "e2m1"]
+ ):
+ return False
+
+ if (
+ kind == ""
+ and op.a.geom in ["m16n8k16", "m16n8k32"]
+ and any(
+ x in ["e3m2", "e2m3", "e2m1"]
+ for x in (op.a.mma_type.ptx_type, op.b.mma_type.ptx_type)
+ )
+ ):
return False
# Require row/col layout for all MMA except m8n8k4 on FP16
if not (op.a.geom == "m8n8k4" and op.a.mma_type.ptx_type == "f16"):
return layout_a == "row" and layout_b == "col"
+
return True
@@ -937,7 +1020,12 @@ define ${ret_ty} @test_${function}(
"""
test_params = params
- test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
+ test_params["intrinsic"] = (
+ Template(intrinsic_template)
+ .substitute(params)
+ .replace("::", ".")
+ .replace("_", ".")
+ )
test_params["function"] = test_params["intrinsic"].replace(".", "_")
test_params["instruction"] = Template(instruction_template).substitute(params)
test_params["ret_ty"] = make_wmma_ld_ret_ty(op.d)
@@ -1002,16 +1090,20 @@ def gen_wmma_mma_tests():
def gen_mma_tests():
- mma_intrinsic_template = "llvm.nvvm.mma${b1op}.${geom}.${alayout}.${blayout}${satf}.${intrinsic_signature}"
- mma_instruction_template = "mma.sync${aligned}.${geom}.${alayout}.${blayout}${satf}.${ptx_signature}${b1op}"
+ mma_intrinsic_template = "llvm.nvvm.mma${b1op}.${geom}.${alayout}.${blayout}${kind}${satf}.${intrinsic_signature}"
+ mma_instruction_template = "mma.sync${aligned}.${geom}.${alayout}.${blayout}${kind}${satf}.${ptx_signature}${b1op}"
generated_items = []
- for op, alayout, blayout, satf in product(
- get_mma_ops(), ["row", "col"], ["row", "col"], [".satfinite", ""]
+ for op, alayout, blayout, kind, satf in product(
+ get_mma_ops(),
+ ["row", "col"],
+ ["row", "col"],
+ ["", ".kind::f8f6f4"],
+ [".satfinite", ""],
):
- if not is_mma_variant_supported(op, alayout, blayout, satf):
+ if not is_mma_variant_supported(op, alayout, blayout, kind, satf):
continue
for b1op in get_b1_ops(op.a.mma_type.ptx_type):
@@ -1024,6 +1116,7 @@ def gen_mma_tests():
"satf": satf,
"geom": op.a.geom,
"b1op": b1op,
+ "kind": kind,
}
intrinsic_template = mma_intrinsic_template
@@ -1105,9 +1198,9 @@ def is_mma_sp_variant_supported(op, metadata, kind, satf):
):
return False
- # C and D type must be the same for m16n8k16/m16n8k32
+ # C and D type must be the same for m16n8k16/m16n8k32/m16n8k64
if (
- op.a.geom in ["m16n8k16", "m16n8k32"]
+ op.a.geom in ["m16n8k16", "m16n8k32", "m16n8k64"]
and op.c.mma_type.ptx_type != op.d.mma_type.ptx_type
):
return False
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/test_counters.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/test_counters.ll
new file mode 100644
index 0000000..b178a56
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/test_counters.ll
@@ -0,0 +1,65 @@
+; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv-vulkan-library %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-vulkan-library %s -o - -filetype=obj | spirv-val --target-env vulkan1.3 %}
+
+; ModuleID = 'test_counters.hlsl'
+source_filename = "test_counters.hlsl"
+
+; CHECK: OpCapability Int8
+; CHECK-DAG: OpName [[OutputBuffer:%[0-9]+]] "OutputBuffer"
+; CHECK-DAG: OpName [[InputBuffer:%[0-9]+]] "InputBuffer"
+; CHECK-DAG: OpName [[OutputBufferCounter:%[0-9]+]] "OutputBuffer.counter"
+; CHECK-DAG: OpName [[InputBufferCounter:%[0-9]+]] "InputBuffer.counter"
+; CHECK-DAG: OpDecorate [[OutputBuffer]] DescriptorSet 0
+; CHECK-DAG: OpDecorate [[OutputBuffer]] Binding 10
+; CHECK-DAG: OpDecorate [[OutputBufferCounter]] DescriptorSet 0
+; CHECK-DAG: OpDecorate [[OutputBufferCounter]] Binding 0
+; CHECK-DAG: OpDecorate [[InputBuffer]] DescriptorSet 0
+; CHECK-DAG: OpDecorate [[InputBuffer]] Binding 1
+; CHECK-DAG: OpDecorate [[InputBufferCounter]] DescriptorSet 0
+; CHECK-DAG: OpDecorate [[InputBufferCounter]] Binding 2
+; CHECK-DAG: [[int:%[0-9]+]] = OpTypeInt 32 0
+; CHECK-DAG: [[zero:%[0-9]+]] = OpConstant [[int]] 0{{$}}
+; CHECK-DAG: [[one:%[0-9]+]] = OpConstant [[int]] 1{{$}}
+; CHECK-DAG: [[minus_one:%[0-9]+]] = OpConstant [[int]] 4294967295
+; CHECK: [[OutputBufferHandle:%[0-9]+]] = OpCopyObject {{%[0-9]+}} [[OutputBuffer]]
+; CHECK: [[InputBufferHandle:%[0-9]+]] = OpCopyObject {{%[0-9]+}} [[InputBuffer]]
+; CHECK: [[InputCounterAC:%[0-9]+]] = OpAccessChain {{%[0-9]+}} [[InputBufferCounter]] [[zero]]
+; CHECK: [[dec:%[0-9]+]] = OpAtomicIAdd [[int]] [[InputCounterAC]] [[one]] [[zero]] [[minus_one]]
+; CHECK: [[iadd:%[0-9]+]] = OpIAdd [[int]] [[dec]] [[minus_one]]
+; CHECK: [[OutputCounterAC:%[0-9]+]] = OpAccessChain {{%[0-9]+}} [[OutputBufferCounter]] [[zero]]
+; CHECK: [[inc:%[0-9]+]] = OpAtomicIAdd [[int]] [[OutputCounterAC]] [[one]] [[zero]] [[one]]
+; CHECK: [[InputAC:%[0-9]+]] = OpAccessChain {{%[0-9]+}} [[InputBufferHandle]] [[zero]] [[iadd]]
+; CHECK: [[load:%[0-9]+]] = OpLoad {{%[0-9]+}} [[InputAC]]
+; CHECK: [[OutputAC:%[0-9]+]] = OpAccessChain {{%[0-9]+}} [[OutputBufferHandle]] [[zero]] [[inc]]
+; CHECK: OpStore [[OutputAC]] [[load]]
+
+
+target triple = "spirv1.6-unknown-vulkan1.3-compute"
+
+@.str = private unnamed_addr constant [13 x i8] c"OutputBuffer\00"
+@.str.2 = private unnamed_addr constant [12 x i8] c"InputBuffer\00"
+
+define void @main() #0 {
+entry:
+ %0 = call target("spirv.VulkanBuffer", [0 x float], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0f32_12_1t(i32 0, i32 10, i32 1, i32 0, ptr @.str)
+ %1 = call target("spirv.VulkanBuffer", i32, 12, 1) @llvm.spv.resource.counterhandlefromimplicitbinding.tspirv.VulkanBuffer_i32_12_1t.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1) %0, i32 0, i32 0)
+ %2 = call target("spirv.VulkanBuffer", [0 x float], 12, 1) @llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_a0f32_12_1t(i32 1, i32 0, i32 1, i32 0, ptr @.str.2)
+ %3 = call target("spirv.VulkanBuffer", i32, 12, 1) @llvm.spv.resource.counterhandlefromimplicitbinding.tspirv.VulkanBuffer_i32_12_1t.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1) %2, i32 2, i32 0)
+ %4 = call i32 @llvm.spv.resource.updatecounter.tspirv.VulkanBuffer_i32_12_1t(target("spirv.VulkanBuffer", i32, 12, 1) %3, i8 -1)
+ %5 = call i32 @llvm.spv.resource.updatecounter.tspirv.VulkanBuffer_i32_12_1t(target("spirv.VulkanBuffer", i32, 12, 1) %1, i8 1)
+ %6 = call ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1) %2, i32 %4)
+ %7 = load float, ptr addrspace(11) %6
+ %8 = call ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1) %0, i32 %5)
+ store float %7, ptr addrspace(11) %8
+ ret void
+}
+
+declare target("spirv.VulkanBuffer", [0 x float], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0f32_12_1t(i32, i32, i32, i32, ptr) #1
+declare target("spirv.VulkanBuffer", i32, 12, 1) @llvm.spv.resource.counterhandlefromimplicitbinding.tspirv.VulkanBuffer_i32_12_1t.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1), i32, i32) #1
+declare target("spirv.VulkanBuffer", [0 x float], 12, 1) @llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_a0f32_12_1t(i32, i32, i32, i32, ptr) #1
+declare i32 @llvm.spv.resource.updatecounter.tspirv.VulkanBuffer_i32_12_1t(target("spirv.VulkanBuffer", i32, 12, 1), i8) #2
+declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1), i32) #1
+
+attributes #0 = { "hlsl.shader"="compute" "hlsl.numthreads"="1,1,1" }
+attributes #1 = { memory(none) }
+attributes #2 = { memory(argmem: readwrite, inaccessiblemem: readwrite) }
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir
index 31de686..92e4588 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-phi.mir
@@ -148,21 +148,21 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
+ ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[COPY2]](s32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s8)
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; CHECK-NEXT: G_BRCOND [[TRUNC]](s1), %bb.2
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s8)
+ ; CHECK-NEXT: G_BRCOND [[TRUNC1]](s1), %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1.cond.false:
; CHECK-NEXT: successors: %bb.2(0x80000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
- ; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2.cond.end:
- ; CHECK-NEXT: [[PHI:%[0-9]+]]:_(s8) = G_PHI [[TRUNC2]](s8), %bb.1, [[TRUNC1]](s8), %bb.0
- ; CHECK-NEXT: $al = COPY [[PHI]](s8)
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:_(s1) = G_PHI [[TRUNC2]](s1), %bb.1, [[TRUNC]](s1), %bb.0
+ ; CHECK-NEXT: [[EXT:%[0-9]+]]:_(s8) = G_ANYEXT [[PHI]](s1)
+ ; CHECK-NEXT: $al = COPY [[EXT]](s8)
; CHECK-NEXT: RET 0, implicit $al
bb.1.entry:
successors: %bb.3(0x40000000), %bb.2(0x40000000)
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-undef-vec-scaling.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-undef-vec-scaling.mir
new file mode 100644
index 0000000..b02832b
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-undef-vec-scaling.mir
@@ -0,0 +1,32 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=avx2 -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*' %s -o - | FileCheck %s --check-prefixes=CHECK,AVX2
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=sse2 -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*' %s -o - | FileCheck %s --check-prefixes=CHECK,SSE2
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=avx512f -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*' %s -o - | FileCheck %s --check-prefixes=CHECK,AVX512F
+
+
+---
+name: test_basic_g_implicit_def_v8i64
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: test_basic_g_implicit_def_v8i64
+ ; AVX512F: {{%[0-9]+}}:_(<8 x s64>) = G_IMPLICIT_DEF
+ ; AVX2: [[DEF_AVX2:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+ ; AVX2-NEXT: {{%[0-9]+}}:_(<8 x s64>) = G_CONCAT_VECTORS [[DEF_AVX2]](<4 x s64>), [[DEF_AVX2]](<4 x s64>)
+ ; SSE2: [[DEF_SSE2:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
+ ; SSE2-NEXT: {{%[0-9]+}}:_(<8 x s64>) = G_CONCAT_VECTORS [[DEF_SSE2]](<2 x s64>), [[DEF_SSE2]](<2 x s64>), [[DEF_SSE2]](<2 x s64>), [[DEF_SSE2]](<2 x s64>)
+ %0:_(<8 x s64>) = G_IMPLICIT_DEF
+ RET 0, implicit %0
+...
+
+---
+name: test_g_implicit_def_cample_size
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: test_g_implicit_def_cample_size
+ ; AVX512: {{%[0-9]+}}:_(<8 x s64>) = G_IMPLICIT_DEF
+ ; AVX2: {{%[0-9]+}}:_(<4 x s64>) = G_IMPLICIT_DEF
+ ; SSE2: {{%[0-9]+}}:_(<2 x s64>) = G_IMPLICIT_DEF
+ %0:_(<5 x s63>) = G_IMPLICIT_DEF
+ RET 0, implicit %0
+...
+
+
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec256.mir
new file mode 100644
index 0000000..254c1b6
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec256.mir
@@ -0,0 +1,23 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: select_cfb_vec256
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: vecr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: vecr, preferred-register: '', flags: [ ] }
+body: |
+ bb.0:
+ liveins: $ymm0
+
+ ; CHECK-LABEL: name: select_cfb_vec256
+ ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY $ymm0
+ ; CHECK-NOT: G_CONSTANT_FOLD_BARRIER
+ ; CHECK-NEXT: $ymm1 = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $ymm1
+ %0:vecr(<8 x s32>) = COPY $ymm0
+ %1:vecr(<8 x s32>) = G_CONSTANT_FOLD_BARRIER %0
+ $ymm1 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm1
+...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec512.mir
new file mode 100644
index 0000000..3da354b
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier-vec512.mir
@@ -0,0 +1,23 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: select_cfb_vec512
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: vecr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: vecr, preferred-register: '', flags: [ ] }
+body: |
+ bb.0:
+ liveins: $zmm0
+
+ ; CHECK-LABEL: name: select_cfb_vec512
+ ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK-NOT: G_CONSTANT_FOLD_BARRIER
+ ; CHECK-NEXT: $zmm1 = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $zmm1
+ %0:vecr(<8 x s64>) = COPY $zmm0
+ %1:vecr(<8 x s64>) = G_CONSTANT_FOLD_BARRIER %0
+ $zmm1 = COPY %1(<8 x s64>)
+ RET 0, implicit $zmm1
+...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier.mir b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier.mir
new file mode 100644
index 0000000..fa012f9
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-constant-fold-barrier.mir
@@ -0,0 +1,77 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+
+---
+name: select_cfb_scalar_s32
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: gpr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: gpr, preferred-register: '', flags: [ ] }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0:
+ liveins: $edi
+
+ ; CHECK-LABEL: name: select_cfb_scalar_s32
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK-NOT: G_CONSTANT_FOLD_BARRIER
+ ; CHECK-NEXT: $eax = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $eax
+ %0:gpr(s32) = COPY $edi
+ %1:gpr(s32) = G_CONSTANT_FOLD_BARRIER %0
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
+...
+
+---
+name: select_cfb_scalar_s64
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: gpr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: gpr, preferred-register: '', flags: [ ] }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0:
+ liveins: $rdi
+
+ ; CHECK-LABEL: name: select_cfb_scalar_s64
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK-NOT: G_CONSTANT_FOLD_BARRIER
+ ; CHECK-NEXT: $rax = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $rax
+ %0:gpr(s64) = COPY $rdi
+ %1:gpr(s64) = G_CONSTANT_FOLD_BARRIER %0
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
+...
+
+
+---
+name: select_cfb_vec128
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: vecr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: vecr, preferred-register: '', flags: [ ] }
+body: |
+ bb.0:
+ liveins: $xmm0
+
+ ; CHECK-LABEL: name: select_cfb_vec128
+ ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; CHECK-NOT: G_CONSTANT_FOLD_BARRIER
+ ; CHECK-NEXT: $xmm1 = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $xmm1
+ %0:vecr(<4 x s32>) = COPY $xmm0
+ %1:vecr(<4 x s32>) = G_CONSTANT_FOLD_BARRIER %0
+ $xmm1 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm1
+...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec256.mir
new file mode 100644
index 0000000..11251e4
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec256.mir
@@ -0,0 +1,23 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: select_freeze_vec256
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: vecr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: vecr, preferred-register: '', flags: [ ] }
+body: |
+ bb.0:
+ liveins: $ymm0
+
+ ; CHECK-LABEL: name: select_freeze_vec256
+ ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY $ymm0
+ ; CHECK-NOT: G_FREEZE
+ ; CHECK-NEXT: $ymm1 = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $ymm1
+ %0:vecr(<8 x s32>) = COPY $ymm0
+ %1:vecr(<8 x s32>) = G_FREEZE %0
+ $ymm1 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm1
+...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec512.mir
new file mode 100644
index 0000000..bcf299a
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-freeze-vec512.mir
@@ -0,0 +1,23 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: select_freeze_vec512
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: vecr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: vecr, preferred-register: '', flags: [ ] }
+body: |
+ bb.0:
+ liveins: $zmm0
+
+ ; CHECK-LABEL: name: select_freeze_vec512
+ ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK-NOT: G_FREEZE
+ ; CHECK-NEXT: $zmm1 = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $zmm1
+ %0:vecr(<8 x s64>) = COPY $zmm0
+ %1:vecr(<8 x s64>) = G_FREEZE %0
+ $zmm1 = COPY %1(<8 x s64>)
+ RET 0, implicit $zmm1
+...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-freeze.mir b/llvm/test/CodeGen/X86/GlobalISel/select-freeze.mir
new file mode 100644
index 0000000..cf5ad47
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-freeze.mir
@@ -0,0 +1,77 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+
+---
+name: select_freeze_scalar_s32
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: gpr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: gpr, preferred-register: '', flags: [ ] }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0:
+ liveins: $edi
+
+ ; CHECK-LABEL: name: select_freeze_scalar_s32
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK-NOT: G_FREEZE
+ ; CHECK-NEXT: $eax = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $eax
+ %0:gpr(s32) = COPY $edi
+ %1:gpr(s32) = G_FREEZE %0
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
+...
+
+---
+name: select_freeze_scalar_s64
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: gpr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: gpr, preferred-register: '', flags: [ ] }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0:
+ liveins: $rdi
+
+ ; CHECK-LABEL: name: select_freeze_scalar_s64
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK-NOT: G_FREEZE
+ ; CHECK-NEXT: $rax = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $rax
+ %0:gpr(s64) = COPY $rdi
+ %1:gpr(s64) = G_FREEZE %0
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
+...
+
+
+---
+name: select_freeze_vec128
+legalized: true
+regBankSelected: true
+registers:
+ - { id: 0, class: vecr, preferred-register: '', flags: [ ] }
+ - { id: 1, class: vecr, preferred-register: '', flags: [ ] }
+body: |
+ bb.0:
+ liveins: $xmm0
+
+ ; CHECK-LABEL: name: select_freeze_vec128
+ ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; CHECK-NOT: G_FREEZE
+ ; CHECK-NEXT: $xmm1 = COPY [[COPY]]
+ ; CHECK-NEXT: RET 0, implicit $xmm1
+ %0:vecr(<4 x s32>) = COPY $xmm0
+ %1:vecr(<4 x s32>) = G_FREEZE %0
+ $xmm1 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm1
+...
diff --git a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll
index 3349d31..b2064b1 100644
--- a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll
+++ b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll
@@ -317,13 +317,13 @@ define void @with_nounwind(i1 %cond) nounwind personality ptr @my_personality {
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
; CHECK-NEXT: LBB4_1: ## %throw
-; CHECK-NEXT: Ltmp0:
+; CHECK-NEXT: Ltmp0: ## EH_LABEL
; CHECK-NEXT: callq _throw_exception
-; CHECK-NEXT: Ltmp1:
+; CHECK-NEXT: Ltmp1: ## EH_LABEL
; CHECK-NEXT: ## %bb.2: ## %unreachable
; CHECK-NEXT: ud2
; CHECK-NEXT: LBB4_3: ## %landing
-; CHECK-NEXT: Ltmp2:
+; CHECK-NEXT: Ltmp2: ## EH_LABEL
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
; CHECK-NEXT: Lfunc_end0:
@@ -340,12 +340,12 @@ define void @with_nounwind(i1 %cond) nounwind personality ptr @my_personality {
; NOCOMPACTUNWIND-NEXT: retq
; NOCOMPACTUNWIND-NEXT: .LBB4_1: # %throw
; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 16
-; NOCOMPACTUNWIND-NEXT: .Ltmp0:
+; NOCOMPACTUNWIND-NEXT: .Ltmp0: # EH_LABEL
; NOCOMPACTUNWIND-NEXT: callq throw_exception@PLT
-; NOCOMPACTUNWIND-NEXT: .Ltmp1:
+; NOCOMPACTUNWIND-NEXT: .Ltmp1: # EH_LABEL
; NOCOMPACTUNWIND-NEXT: # %bb.2: # %unreachable
; NOCOMPACTUNWIND-NEXT: .LBB4_3: # %landing
-; NOCOMPACTUNWIND-NEXT: .Ltmp2:
+; NOCOMPACTUNWIND-NEXT: .Ltmp2: # EH_LABEL
; NOCOMPACTUNWIND-NEXT: popq %rax
; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 8
; NOCOMPACTUNWIND-NEXT: retq
@@ -379,9 +379,9 @@ define void @with_nounwind_same_succ(i1 %cond) nounwind personality ptr @my_pers
; CHECK-NEXT: ## %bb.1: ## %throw
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: Ltmp3:
+; CHECK-NEXT: Ltmp3: ## EH_LABEL
; CHECK-NEXT: callq _throw_exception
-; CHECK-NEXT: Ltmp4:
+; CHECK-NEXT: Ltmp4: ## EH_LABEL
; CHECK-NEXT: LBB5_3: ## %fallthrough
; CHECK-NEXT: ## InlineAsm Start
; CHECK-NEXT: nop
@@ -390,7 +390,7 @@ define void @with_nounwind_same_succ(i1 %cond) nounwind personality ptr @my_pers
; CHECK-NEXT: LBB5_4: ## %return
; CHECK-NEXT: retq
; CHECK-NEXT: LBB5_2: ## %landing
-; CHECK-NEXT: Ltmp5:
+; CHECK-NEXT: Ltmp5: ## EH_LABEL
; CHECK-NEXT: jmp LBB5_3
; CHECK-NEXT: Lfunc_end1:
;
@@ -401,9 +401,9 @@ define void @with_nounwind_same_succ(i1 %cond) nounwind personality ptr @my_pers
; NOCOMPACTUNWIND-NEXT: # %bb.1: # %throw
; NOCOMPACTUNWIND-NEXT: pushq %rax
; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 16
-; NOCOMPACTUNWIND-NEXT: .Ltmp3:
+; NOCOMPACTUNWIND-NEXT: .Ltmp3: # EH_LABEL
; NOCOMPACTUNWIND-NEXT: callq throw_exception@PLT
-; NOCOMPACTUNWIND-NEXT: .Ltmp4:
+; NOCOMPACTUNWIND-NEXT: .Ltmp4: # EH_LABEL
; NOCOMPACTUNWIND-NEXT: .LBB5_3: # %fallthrough
; NOCOMPACTUNWIND-NEXT: #APP
; NOCOMPACTUNWIND-NEXT: nop
@@ -414,7 +414,7 @@ define void @with_nounwind_same_succ(i1 %cond) nounwind personality ptr @my_pers
; NOCOMPACTUNWIND-NEXT: retq
; NOCOMPACTUNWIND-NEXT: .LBB5_2: # %landing
; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 16
-; NOCOMPACTUNWIND-NEXT: .Ltmp5:
+; NOCOMPACTUNWIND-NEXT: .Ltmp5: # EH_LABEL
; NOCOMPACTUNWIND-NEXT: jmp .LBB5_3
entry:
br i1 %cond, label %throw, label %return
diff --git a/llvm/test/ThinLTO/X86/memprof-basic.ll b/llvm/test/ThinLTO/X86/memprof-basic.ll
index 0ff0ce0..537e1b8 100644
--- a/llvm/test/ThinLTO/X86/memprof-basic.ll
+++ b/llvm/test/ThinLTO/X86/memprof-basic.ll
@@ -103,7 +103,9 @@ declare i32 @sleep()
define internal ptr @_Z3barv() #0 !dbg !15 {
entry:
- %call = call ptr @_Znam(i64 0), !memprof !2, !callsite !7
+ ;; Use an ambiguous attribute for this allocation, which is now added to such
+ ;; allocations during matching. It should not affect cloning.
+ %call = call ptr @_Znam(i64 0) #1, !memprof !2, !callsite !7
ret ptr null
}
@@ -125,6 +127,7 @@ entry:
uselistorder ptr @_Z3foov, { 1, 0 }
attributes #0 = { noinline optnone }
+attributes #1 = { "memprof"="ambiguous" }
!llvm.dbg.cu = !{!13}
!llvm.module.flags = !{!20, !21}
diff --git a/llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll b/llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll
index d0e7c1c2..e1e1611 100644
--- a/llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll
+++ b/llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll
@@ -80,8 +80,8 @@ cleanup2:
; CHECK: cleanup2.corodispatch:
; CHECK: %1 = phi i8 [ 0, %handler2 ], [ 1, %catch.dispatch.2 ]
; CHECK: %2 = cleanuppad within %h1 []
-; CHECK: %switch = icmp ult i8 %1, 1
-; CHECK: br i1 %switch, label %cleanup2.from.handler2, label %cleanup2.from.catch.dispatch.2
+; CHECK: %3 = icmp eq i8 %1, 0
+; CHECK: br i1 %3, label %cleanup2.from.handler2, label %cleanup2.from.catch.dispatch.2
; CHECK: cleanup2.from.handler2:
; CHECK: %valueB.reload = load i32, ptr %valueB.spill.addr, align 4
diff --git a/llvm/test/Transforms/PGOProfile/memprof.ll b/llvm/test/Transforms/PGOProfile/memprof.ll
index c69d031..f6a89a8 100644
--- a/llvm/test/Transforms/PGOProfile/memprof.ll
+++ b/llvm/test/Transforms/PGOProfile/memprof.ll
@@ -38,7 +38,7 @@
; ALL-NOT: no profile data available for function
;; Using a memprof-only profile for memprof-use should only give memprof metadata
-; RUN: opt < %s -passes='memprof-use<profile-filename=%t.memprofdata>' -pgo-warn-missing-function -S -memprof-print-match-info -stats 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,MEMPROFONLY,MEMPROFMATCHINFO,MEMPROFSTATS
+; RUN: opt < %s -passes='memprof-use<profile-filename=%t.memprofdata>' -pgo-warn-missing-function -S -memprof-print-match-info -stats 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,MEMPROFONLY,MEMPROFMATCHINFO,MEMPROFSTATS,AMBIG
; There should not be any PGO metadata
; MEMPROFONLY-NOT: !prof
@@ -51,10 +51,10 @@
;; Test the same thing but by passing the memory profile through to a default
;; pipeline via -memory-profile-file=, which should cause the necessary field
;; of the PGOOptions structure to be populated with the profile filename.
-; RUN: opt < %s -passes='default<O2>' -memory-profile-file=%t.memprofdata -pgo-warn-missing-function -S 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,MEMPROFONLY
+; RUN: opt < %s -passes='default<O2>' -memory-profile-file=%t.memprofdata -pgo-warn-missing-function -S 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,MEMPROFONLY,AMBIG
;; Using a pgo+memprof profile for memprof-use should only give memprof metadata
-; RUN: opt < %s -passes='memprof-use<profile-filename=%t.pgomemprofdata>' -pgo-warn-missing-function -S 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,MEMPROFONLY
+; RUN: opt < %s -passes='memprof-use<profile-filename=%t.pgomemprofdata>' -pgo-warn-missing-function -S 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,MEMPROFONLY,AMBIG
;; Using a pgo-only profile for memprof-use should give an error
; RUN: not opt < %s -passes='memprof-use<profile-filename=%t.pgoprofdata>' -S 2>&1 | FileCheck %s --check-prefixes=MEMPROFWITHPGOONLY
@@ -72,7 +72,7 @@
;; Using a pgo+memprof profile for both memprof-use and pgo-instr-use should
;; give both memprof and pgo metadata.
-; RUN: opt < %s -passes='pgo-instr-use,memprof-use<profile-filename=%t.pgomemprofdata>' -pgo-test-profile-file=%t.pgomemprofdata -pgo-warn-missing-function -S 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,PGO
+; RUN: opt < %s -passes='pgo-instr-use,memprof-use<profile-filename=%t.pgomemprofdata>' -pgo-test-profile-file=%t.pgomemprofdata -pgo-warn-missing-function -S 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,PGO,AMBIG
;; Check that the total sizes are reported if requested. A message should be
;; emitted for the pruned context. Also check that remarks are emitted for the
@@ -108,7 +108,11 @@
;; However, with the same threshold, but hot hints not enabled, it should be
;; notcold again.
-; RUN: opt < %s -passes='memprof-use<profile-filename=%t.memprofdata>' -pgo-warn-missing-function -S -memprof-min-ave-lifetime-access-density-hot-threshold=0 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL
+; RUN: opt < %s -passes='memprof-use<profile-filename=%t.memprofdata>' -pgo-warn-missing-function -S -memprof-min-ave-lifetime-access-density-hot-threshold=0 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,AMBIG
+
+;; Test that we don't get an ambiguous memprof attribute when
+;; -memprof-ambiguous-attributes is disabled.
+; RUN: opt < %s -passes='memprof-use<profile-filename=%t.memprofdata>' -pgo-warn-missing-function -S -memprof-ambiguous-attributes=false 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,NOAMBIG
; MEMPROFMATCHINFO: MemProf notcold context with id 1093248920606587996 has total profiled size 10 is matched with 1 frames
; MEMPROFMATCHINFO: MemProf notcold context with id 5725971306423925017 has total profiled size 10 is matched with 1 frames
@@ -140,7 +144,7 @@ target triple = "x86_64-unknown-linux-gnu"
; PGO: !prof
define dso_local noundef ptr @_Z3foov() #0 !dbg !10 {
entry:
- ; MEMPROF: call {{.*}} @_Znam{{.*}} !memprof ![[M1:[0-9]+]], !callsite ![[C1:[0-9]+]]
+ ; MEMPROF: call {{.*}} @_Znam{{.*}} #[[A0:[0-9]+]]{{.*}} !memprof ![[M1:[0-9]+]], !callsite ![[C1:[0-9]+]]
; MEMPROFNOCOLINFO: call {{.*}} @_Znam{{.*}} !memprof ![[M1:[0-9]+]], !callsite ![[C1:[0-9]+]]
%call = call noalias noundef nonnull ptr @_Znam(i64 noundef 10) #6, !dbg !13
ret ptr %call, !dbg !14
@@ -364,6 +368,9 @@ for.end: ; preds = %for.cond
ret i32 0, !dbg !103
}
+;; We optionally apply an ambiguous memprof attribute to ambiguous allocations
+; AMBIG: #[[A0]] = { builtin allocsize(0) "memprof"="ambiguous" }
+; NOAMBIG: #[[A0]] = { builtin allocsize(0) }
; MEMPROF: #[[A1]] = { builtin allocsize(0) "memprof"="notcold" }
; MEMPROF: #[[A2]] = { builtin allocsize(0) "memprof"="cold" }
; MEMPROF: ![[M1]] = !{![[MIB1:[0-9]+]], ![[MIB2:[0-9]+]], ![[MIB3:[0-9]+]], ![[MIB4:[0-9]+]]}
diff --git a/llvm/test/Transforms/SCCP/relax-range-checks.ll b/llvm/test/Transforms/SCCP/relax-range-checks.ll
index 90722f3..34e4813 100644
--- a/llvm/test/Transforms/SCCP/relax-range-checks.ll
+++ b/llvm/test/Transforms/SCCP/relax-range-checks.ll
@@ -89,4 +89,28 @@ define i1 @relax_range_check_multiuse(i8 range(i8 0, 5) %x) {
ret i1 %ret
}
+define i1 @range_check_to_icmp_eq1(i32 range(i32 0, 4) %x) {
+; CHECK-LABEL: define i1 @range_check_to_icmp_eq1(
+; CHECK-SAME: i32 range(i32 0, 4) [[X:%.*]]) {
+; CHECK-NEXT: [[OFF:%.*]] = add nsw i32 [[X]], -3
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 3
+; CHECK-NEXT: ret i1 [[TMP1]]
+;
+ %off = add nsw i32 %x, -3
+ %cmp = icmp ult i32 %off, 2
+ ret i1 %cmp
+}
+
+define i1 @range_check_to_icmp_eq2(i32 range(i32 -1, 2) %x) {
+; CHECK-LABEL: define i1 @range_check_to_icmp_eq2(
+; CHECK-SAME: i32 range(i32 -1, 2) [[X:%.*]]) {
+; CHECK-NEXT: [[OFF:%.*]] = add nsw i32 [[X]], -1
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], 1
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %off = add nsw i32 %x, -1
+ %cmp = icmp ult i32 %off, -2
+ ret i1 %cmp
+}
+
declare void @use(i8)
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll
index 655db54..a079203 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll
@@ -10,14 +10,10 @@ define void @test() {
; CHECK-NEXT: [[SUB4_I_I65_US:%.*]] = or i64 0, 1
; CHECK-NEXT: br label [[BODY:%.*]]
; CHECK: body:
-; CHECK-NEXT: [[ADD_I_I62_US:%.*]] = shl i64 0, 0
-; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> <i64 poison, i64 1>, i64 [[ADD_I_I62_US]], i32 0
-; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i64> zeroinitializer, [[TMP0]]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [[CLASS_A:%.*]], <2 x ptr> zeroinitializer, <2 x i64> [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> [[TMP2]], i32 4, <2 x i1> splat (i1 true), <2 x i32> poison)
-; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i32> [[TMP3]], i32 0
-; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i32> [[TMP3]], i32 1
-; CHECK-NEXT: [[CMP_I_I_I_I67_US:%.*]] = icmp slt i32 [[TMP4]], [[TMP5]]
+; CHECK-NEXT: [[TMP0:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> getelementptr ([[CLASS_A:%.*]], <2 x ptr> zeroinitializer, <2 x i64> <i64 0, i64 1>), i32 4, <2 x i1> splat (i1 true), <2 x i32> poison)
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i32> [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i32> [[TMP0]], i32 1
+; CHECK-NEXT: [[CMP_I_I_I_I67_US:%.*]] = icmp slt i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[SPEC_SELECT_I_I68_US:%.*]] = select i1 false, i64 [[SUB4_I_I65_US]], i64 0
; CHECK-NEXT: br label [[BODY]]
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/ext-used-scalar-different-bitwidth.ll b/llvm/test/Transforms/SLPVectorizer/X86/ext-used-scalar-different-bitwidth.ll
index 7758596..87f2cca 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/ext-used-scalar-different-bitwidth.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/ext-used-scalar-different-bitwidth.ll
@@ -8,8 +8,8 @@ define i32 @test() {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: store i32 152, ptr @f, align 4
; CHECK-NEXT: [[AGG_TMP_SROA_0_0_COPYLOAD_I:%.*]] = load i32, ptr @f, align 4
-; CHECK-NEXT: [[ADD_I_I:%.*]] = shl i32 [[AGG_TMP_SROA_0_0_COPYLOAD_I]], 24
-; CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i32> <i32 poison, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080>, i32 [[ADD_I_I]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i32> <i32 poison, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080>, i32 [[AGG_TMP_SROA_0_0_COPYLOAD_I]], i32 0
+; CHECK-NEXT: [[TMP0:%.*]] = shl <8 x i32> [[TMP3]], <i32 24, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
; CHECK-NEXT: [[TMP1:%.*]] = add <8 x i32> <i32 83886080, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, [[TMP0]]
; CHECK-NEXT: [[TMP2:%.*]] = ashr <8 x i32> [[TMP1]], splat (i32 24)
; CHECK-NEXT: [[TMP5:%.*]] = and <8 x i32> [[TMP2]], <i32 66440127, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll b/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll
index 75aec45..3e0a374 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll
@@ -247,32 +247,12 @@ entry:
}
define void @shl0(ptr noalias %dst, ptr noalias %src) {
-; NON-POW2-LABEL: @shl0(
-; NON-POW2-NEXT: entry:
-; NON-POW2-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 1
-; NON-POW2-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4
-; NON-POW2-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 1
-; NON-POW2-NEXT: store i32 [[TMP0]], ptr [[DST]], align 4
-; NON-POW2-NEXT: [[TMP1:%.*]] = load <3 x i32>, ptr [[INCDEC_PTR]], align 4
-; NON-POW2-NEXT: [[TMP2:%.*]] = shl <3 x i32> [[TMP1]], <i32 1, i32 2, i32 3>
-; NON-POW2-NEXT: store <3 x i32> [[TMP2]], ptr [[INCDEC_PTR1]], align 4
-; NON-POW2-NEXT: ret void
-;
-; POW2-ONLY-LABEL: @shl0(
-; POW2-ONLY-NEXT: entry:
-; POW2-ONLY-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 1
-; POW2-ONLY-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4
-; POW2-ONLY-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 1
-; POW2-ONLY-NEXT: store i32 [[TMP0]], ptr [[DST]], align 4
-; POW2-ONLY-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 3
-; POW2-ONLY-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 3
-; POW2-ONLY-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr [[INCDEC_PTR]], align 4
-; POW2-ONLY-NEXT: [[TMP2:%.*]] = shl <2 x i32> [[TMP1]], <i32 1, i32 2>
-; POW2-ONLY-NEXT: store <2 x i32> [[TMP2]], ptr [[INCDEC_PTR1]], align 4
-; POW2-ONLY-NEXT: [[TMP3:%.*]] = load i32, ptr [[INCDEC_PTR4]], align 4
-; POW2-ONLY-NEXT: [[SHL8:%.*]] = shl i32 [[TMP3]], 3
-; POW2-ONLY-NEXT: store i32 [[SHL8]], ptr [[INCDEC_PTR6]], align 4
-; POW2-ONLY-NEXT: ret void
+; CHECK-LABEL: @shl0(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr [[SRC:%.*]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = shl <4 x i32> [[TMP0]], <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: store <4 x i32> [[TMP1]], ptr [[DST:%.*]], align 4
+; CHECK-NEXT: ret void
;
entry:
%incdec.ptr = getelementptr inbounds i32, ptr %src, i64 1
diff --git a/llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll b/llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll
index a5b1e9b..769b360 100644
--- a/llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll
+++ b/llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll
@@ -1,25 +1,44 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
-; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s %}
-; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer < %s -mtriple=aarch64-unknown-linux-gnu | FileCheck %s %}
+; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=X86 %}
+; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer < %s -mtriple=aarch64-unknown-linux-gnu | FileCheck %s --check-prefix=AARCH64 %}
define i1 @test(i32 %0, i32 %1, i32 %p) {
-; CHECK-LABEL: define i1 @test(
-; CHECK-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]], i32 [[P:%.*]]) {
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
-; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i32 0
-; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP4:%.*]] = shl <4 x i32> zeroinitializer, [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = icmp slt <4 x i32> [[TMP4]], zeroinitializer
-; CHECK-NEXT: [[CMP6:%.*]] = icmp slt i32 0, [[P]]
-; CHECK-NEXT: [[TMP6:%.*]] = freeze <4 x i1> [[TMP5]]
-; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
-; CHECK-NEXT: [[OP_RDX:%.*]] = select i1 [[TMP7]], i1 true, i1 [[CMP6]]
-; CHECK-NEXT: [[OP_RDX1:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP1]]
-; CHECK-NEXT: [[TMP8:%.*]] = freeze i1 [[OP_RDX]]
-; CHECK-NEXT: [[OP_RDX2:%.*]] = select i1 [[TMP8]], i1 true, i1 [[OP_RDX1]]
-; CHECK-NEXT: ret i1 [[OP_RDX2]]
+; X86-LABEL: define i1 @test(
+; X86-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]], i32 [[P:%.*]]) {
+; X86-NEXT: entry:
+; X86-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
+; X86-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i32 0
+; X86-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <4 x i32> zeroinitializer
+; X86-NEXT: [[TMP4:%.*]] = shl <4 x i32> zeroinitializer, [[TMP3]]
+; X86-NEXT: [[TMP5:%.*]] = icmp slt <4 x i32> [[TMP4]], zeroinitializer
+; X86-NEXT: [[CMP6:%.*]] = icmp slt i32 0, [[P]]
+; X86-NEXT: [[TMP6:%.*]] = freeze <4 x i1> [[TMP5]]
+; X86-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
+; X86-NEXT: [[OP_RDX:%.*]] = select i1 [[TMP7]], i1 true, i1 [[CMP6]]
+; X86-NEXT: [[OP_RDX1:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP1]]
+; X86-NEXT: [[TMP8:%.*]] = freeze i1 [[OP_RDX]]
+; X86-NEXT: [[OP_RDX2:%.*]] = select i1 [[TMP8]], i1 true, i1 [[OP_RDX1]]
+; X86-NEXT: ret i1 [[OP_RDX2]]
+;
+; AARCH64-LABEL: define i1 @test(
+; AARCH64-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]], i32 [[P:%.*]]) {
+; AARCH64-NEXT: entry:
+; AARCH64-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
+; AARCH64-NEXT: [[SHL4:%.*]] = shl i32 0, [[TMP1]]
+; AARCH64-NEXT: [[CMP5:%.*]] = icmp slt i32 [[SHL4]], 0
+; AARCH64-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> <i32 0, i32 poison, i32 poison, i32 poison>, i32 [[TMP1]], i32 1
+; AARCH64-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <4 x i32> <i32 0, i32 1, i32 1, i32 1>
+; AARCH64-NEXT: [[TMP4:%.*]] = shl <4 x i32> zeroinitializer, [[TMP3]]
+; AARCH64-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> <i32 poison, i32 0, i32 0, i32 0>, i32 [[P]], i32 0
+; AARCH64-NEXT: [[TMP6:%.*]] = icmp slt <4 x i32> [[TMP4]], [[TMP5]]
+; AARCH64-NEXT: [[TMP7:%.*]] = freeze <4 x i1> [[TMP6]]
+; AARCH64-NEXT: [[TMP8:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP7]])
+; AARCH64-NEXT: [[OP_RDX:%.*]] = select i1 [[TMP8]], i1 true, i1 [[CMP5]]
+; AARCH64-NEXT: [[OP_RDX1:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP1]]
+; AARCH64-NEXT: [[TMP9:%.*]] = freeze i1 [[OP_RDX]]
+; AARCH64-NEXT: [[OP_RDX2:%.*]] = select i1 [[TMP9]], i1 true, i1 [[OP_RDX1]]
+; AARCH64-NEXT: ret i1 [[OP_RDX2]]
;
entry:
%cmp1 = icmp sgt i32 %0, 0
diff --git a/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll b/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll
index 4a457cc..a0e29dd 100644
--- a/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll
+++ b/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll
@@ -7,8 +7,7 @@ declare void @foo(i32)
define void @test(i1 %a) {
; CHECK-LABEL: define void @test(
; CHECK-SAME: i1 [[A:%.*]]) {
-; CHECK-NEXT: [[A_OFF:%.*]] = add i1 [[A]], true
-; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i1 [[A_OFF]], true
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i1 [[A]], true
; CHECK-NEXT: br i1 [[SWITCH]], label [[TRUE:%.*]], label [[FALSE:%.*]]
; CHECK: common.ret:
; CHECK-NEXT: ret void
@@ -209,8 +208,7 @@ define void @test5(i8 %a) {
; CHECK-SAME: i8 [[A:%.*]]) {
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[A]], 2
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: [[A_OFF:%.*]] = add i8 [[A]], -1
-; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[A_OFF]], 1
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i8 [[A]], 1
; CHECK-NEXT: br i1 [[SWITCH]], label [[TRUE:%.*]], label [[FALSE:%.*]]
; CHECK: common.ret:
; CHECK-NEXT: ret void
@@ -243,8 +241,7 @@ define void @test6(i8 %a) {
; CHECK-NEXT: [[AND:%.*]] = and i8 [[A]], -2
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[AND]], -2
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: [[A_OFF:%.*]] = add i8 [[A]], 1
-; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[A_OFF]], 1
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i8 [[A]], -1
; CHECK-NEXT: br i1 [[SWITCH]], label [[TRUE:%.*]], label [[FALSE:%.*]]
; CHECK: common.ret:
; CHECK-NEXT: ret void
@@ -279,8 +276,7 @@ define void @test7(i8 %a) {
; CHECK-NEXT: [[AND:%.*]] = and i8 [[A]], -2
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[AND]], -2
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: [[A_OFF:%.*]] = add i8 [[A]], 1
-; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[A_OFF]], 1
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i8 [[A]], -1
; CHECK-NEXT: br i1 [[SWITCH]], label [[TRUE:%.*]], label [[FALSE:%.*]]
; CHECK: common.ret:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll b/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll
index 8f2ae2d..0fc3c19 100644
--- a/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll
+++ b/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll
@@ -188,4 +188,217 @@ exit:
ret void
}
+define i32 @wrapping_known_range(i8 range(i8 0, 6) %arg) {
+; CHECK-LABEL: @wrapping_known_range(
+; CHECK-NEXT: [[ARG_OFF:%.*]] = add i8 [[ARG:%.*]], -1
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[ARG_OFF]], 3
+; CHECK-NEXT: br i1 [[SWITCH]], label [[ELSE:%.*]], label [[IF:%.*]]
+; CHECK: common.ret:
+; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[I0:%.*]], [[IF]] ], [ [[I1:%.*]], [[ELSE]] ]
+; CHECK-NEXT: ret i32 [[COMMON_RET_OP]]
+; CHECK: if:
+; CHECK-NEXT: [[I0]] = call i32 @f(i32 0)
+; CHECK-NEXT: br label [[COMMON_RET:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[I1]] = call i32 @f(i32 1)
+; CHECK-NEXT: br label [[COMMON_RET]]
+;
+ switch i8 %arg, label %else [
+ i8 0, label %if
+ i8 4, label %if
+ i8 5, label %if
+ ]
+
+if:
+ %i0 = call i32 @f(i32 0)
+ ret i32 %i0
+
+else:
+ %i1 = call i32 @f(i32 1)
+ ret i32 %i1
+}
+
+define i32 @wrapping_known_range_2(i8 range(i8 0, 6) %arg) {
+; CHECK-LABEL: @wrapping_known_range_2(
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i8 [[ARG:%.*]], 1
+; CHECK-NEXT: br i1 [[SWITCH]], label [[ELSE:%.*]], label [[IF:%.*]]
+; CHECK: common.ret:
+; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[I0:%.*]], [[IF]] ], [ [[I1:%.*]], [[ELSE]] ]
+; CHECK-NEXT: ret i32 [[COMMON_RET_OP]]
+; CHECK: if:
+; CHECK-NEXT: [[I0]] = call i32 @f(i32 0)
+; CHECK-NEXT: br label [[COMMON_RET:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[I1]] = call i32 @f(i32 1)
+; CHECK-NEXT: br label [[COMMON_RET]]
+;
+ switch i8 %arg, label %else [
+ i8 0, label %if
+ i8 2, label %if
+ i8 3, label %if
+ i8 4, label %if
+ i8 5, label %if
+ ]
+
+if:
+ %i0 = call i32 @f(i32 0)
+ ret i32 %i0
+
+else:
+ %i1 = call i32 @f(i32 1)
+ ret i32 %i1
+}
+
+define i32 @wrapping_range(i8 %arg) {
+; CHECK-LABEL: @wrapping_range(
+; CHECK-NEXT: [[ARG_OFF:%.*]] = add i8 [[ARG:%.*]], -1
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[ARG_OFF]], -4
+; CHECK-NEXT: br i1 [[SWITCH]], label [[ELSE:%.*]], label [[IF:%.*]]
+; CHECK: common.ret:
+; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[I0:%.*]], [[IF]] ], [ [[I1:%.*]], [[ELSE]] ]
+; CHECK-NEXT: ret i32 [[COMMON_RET_OP]]
+; CHECK: if:
+; CHECK-NEXT: [[I0]] = call i32 @f(i32 0)
+; CHECK-NEXT: br label [[COMMON_RET:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[I1]] = call i32 @f(i32 1)
+; CHECK-NEXT: br label [[COMMON_RET]]
+;
+ switch i8 %arg, label %else [
+ i8 0, label %if
+ i8 -3, label %if
+ i8 -2, label %if
+ i8 -1, label %if
+ ]
+
+if:
+ %i0 = call i32 @f(i32 0)
+ ret i32 %i0
+
+else:
+ %i1 = call i32 @f(i32 1)
+ ret i32 %i1
+}
+
+define i8 @wrapping_range_phi(i8 %arg) {
+; CHECK-LABEL: @wrapping_range_phi(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARG_OFF:%.*]] = add i8 [[ARG:%.*]], -1
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[ARG_OFF]], -2
+; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[SWITCH]], i8 0, i8 1
+; CHECK-NEXT: ret i8 [[SPEC_SELECT]]
+;
+entry:
+ switch i8 %arg, label %else [
+ i8 0, label %if
+ i8 -1, label %if
+ ]
+
+if:
+ %i = phi i8 [ 0, %else ], [ 1, %entry ], [ 1, %entry ]
+ ret i8 %i
+
+else:
+ br label %if
+}
+
+define i32 @no_continuous_wrapping_range(i8 %arg) {
+; CHECK-LABEL: @no_continuous_wrapping_range(
+; CHECK-NEXT: switch i8 [[ARG:%.*]], label [[ELSE:%.*]] [
+; CHECK-NEXT: i8 0, label [[IF:%.*]]
+; CHECK-NEXT: i8 -3, label [[IF]]
+; CHECK-NEXT: i8 -1, label [[IF]]
+; CHECK-NEXT: ]
+; CHECK: common.ret:
+; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[I0:%.*]], [[IF]] ], [ [[I1:%.*]], [[ELSE]] ]
+; CHECK-NEXT: ret i32 [[COMMON_RET_OP]]
+; CHECK: if:
+; CHECK-NEXT: [[I0]] = call i32 @f(i32 0)
+; CHECK-NEXT: br label [[COMMON_RET:%.*]]
+; CHECK: else:
+; CHECK-NEXT: [[I1]] = call i32 @f(i32 1)
+; CHECK-NEXT: br label [[COMMON_RET]]
+;
+ switch i8 %arg, label %else [
+ i8 0, label %if
+ i8 -3, label %if
+ i8 -1, label %if
+ ]
+
+if:
+ %i0 = call i32 @f(i32 0)
+ ret i32 %i0
+
+else:
+ %i1 = call i32 @f(i32 1)
+ ret i32 %i1
+}
+
+define i32 @one_case_1(i32 %x) {
+; CHECK-LABEL: @one_case_1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i32 [[X:%.*]], 10
+; CHECK-NEXT: br i1 [[SWITCH]], label [[A:%.*]], label [[B:%.*]]
+; CHECK: common.ret:
+; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[TMP0:%.*]], [[B]] ], [ [[TMP1:%.*]], [[A]] ]
+; CHECK-NEXT: ret i32 [[COMMON_RET_OP]]
+; CHECK: a:
+; CHECK-NEXT: [[TMP0]] = call i32 @f(i32 0)
+; CHECK-NEXT: br label [[COMMON_RET:%.*]]
+; CHECK: b:
+; CHECK-NEXT: [[TMP1]] = call i32 @f(i32 1)
+; CHECK-NEXT: br label [[COMMON_RET]]
+;
+entry:
+ switch i32 %x, label %unreachable [
+ i32 5, label %a
+ i32 6, label %a
+ i32 7, label %a
+ i32 10, label %b
+ ]
+
+unreachable:
+ unreachable
+a:
+ %0 = call i32 @f(i32 0)
+ ret i32 %0
+b:
+ %1 = call i32 @f(i32 1)
+ ret i32 %1
+}
+
+define i32 @one_case_2(i32 %x) {
+; CHECK-LABEL: @one_case_2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SWITCH:%.*]] = icmp eq i32 [[X:%.*]], 5
+; CHECK-NEXT: br i1 [[SWITCH]], label [[A:%.*]], label [[B:%.*]]
+; CHECK: common.ret:
+; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[TMP0:%.*]], [[A]] ], [ [[TMP1:%.*]], [[B]] ]
+; CHECK-NEXT: ret i32 [[COMMON_RET_OP]]
+; CHECK: a:
+; CHECK-NEXT: [[TMP0]] = call i32 @f(i32 0)
+; CHECK-NEXT: br label [[COMMON_RET:%.*]]
+; CHECK: b:
+; CHECK-NEXT: [[TMP1]] = call i32 @f(i32 1)
+; CHECK-NEXT: br label [[COMMON_RET]]
+;
+entry:
+ switch i32 %x, label %unreachable [
+ i32 5, label %a
+ i32 10, label %b
+ i32 11, label %b
+ i32 12, label %b
+ i32 13, label %b
+ ]
+
+unreachable:
+ unreachable
+a:
+ %0 = call i32 @f(i32 0)
+ ret i32 %0
+b:
+ %1 = call i32 @f(i32 1)
+ ret i32 %1
+}
+
declare void @bar(ptr nonnull dereferenceable(4))
diff --git a/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s b/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s
new file mode 100644
index 0000000..65e1203
--- /dev/null
+++ b/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s
@@ -0,0 +1,8 @@
+REQUIRES: aarch64-registered-target
+
+RUN: llvm-exegesis -mtriple=aarch64 -mcpu=neoverse-v2 -mode=latency --dump-object-to-disk=%d --opcode-name=FMOVWSr --benchmark-phase=assemble-measured-code 2>&1
+RUN: llvm-objdump -d %d > %t.s
+RUN: FileCheck %s < %t.s
+
+CHECK-NOT: ld{{[1-4]}}
+CHECK-NOT: st{{[1-4]}}
diff --git a/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2.s b/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2.s
index d777d31..8e0d47e 100644
--- a/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2.s
+++ b/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2.s
@@ -153,12 +153,12 @@ vpshrdw $1, (%rax), %zmm17, %zmm19 {k1}{z}
# CHECK-NEXT: 2 8 1.00 * vpcompressw %zmm16, (%rax) {%k1}
# CHECK-NEXT: 1 1 1.00 vpcompressw %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 1 1 1.00 U vpexpandb %zmm16, %zmm19
-# CHECK-NEXT: 2 8 1.00 U vpexpandb (%rax), %zmm19
+# CHECK-NEXT: 2 8 1.00 * U vpexpandb (%rax), %zmm19
# CHECK-NEXT: 1 1 1.00 vpexpandb %zmm16, %zmm19 {%k1}
# CHECK-NEXT: 2 8 1.00 * vpexpandb (%rax), %zmm19 {%k1}
# CHECK-NEXT: 1 1 1.00 vpexpandb %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 1 1 1.00 U vpexpandw %zmm16, %zmm19
-# CHECK-NEXT: 2 8 1.00 U vpexpandw (%rax), %zmm19
+# CHECK-NEXT: 2 8 1.00 * U vpexpandw (%rax), %zmm19
# CHECK-NEXT: 1 1 1.00 vpexpandw %zmm16, %zmm19 {%k1}
# CHECK-NEXT: 2 8 1.00 * vpexpandw (%rax), %zmm19 {%k1}
# CHECK-NEXT: 1 1 1.00 vpexpandw %zmm16, %zmm19 {%k1} {z}
diff --git a/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2vl.s b/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2vl.s
index 99b88fe..f6be964 100644
--- a/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2vl.s
+++ b/llvm/test/tools/llvm-mca/X86/Generic/resources-avx512vbmi2vl.s
@@ -295,22 +295,22 @@ vpshrdw $1, (%rax), %ymm17, %ymm19 {k1}{z}
# CHECK-NEXT: 2 8 1.00 * vpcompressw %ymm16, (%rax) {%k1}
# CHECK-NEXT: 1 1 1.00 vpcompressw %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 1 1 1.00 U vpexpandb %xmm16, %xmm19
-# CHECK-NEXT: 2 8 1.00 U vpexpandb (%rax), %xmm19
+# CHECK-NEXT: 2 8 1.00 * U vpexpandb (%rax), %xmm19
# CHECK-NEXT: 1 1 1.00 vpexpandb %xmm16, %xmm19 {%k1}
# CHECK-NEXT: 2 8 1.00 * vpexpandb (%rax), %xmm19 {%k1}
# CHECK-NEXT: 1 1 1.00 vpexpandb %xmm16, %xmm19 {%k1} {z}
# CHECK-NEXT: 1 1 1.00 U vpexpandb %ymm16, %ymm19
-# CHECK-NEXT: 2 8 1.00 U vpexpandb (%rax), %ymm19
+# CHECK-NEXT: 2 8 1.00 * U vpexpandb (%rax), %ymm19
# CHECK-NEXT: 1 1 1.00 vpexpandb %ymm16, %ymm19 {%k1}
# CHECK-NEXT: 2 8 1.00 * vpexpandb (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 1 1.00 vpexpandb %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 1 1 1.00 U vpexpandw %xmm16, %xmm19
-# CHECK-NEXT: 2 8 1.00 U vpexpandw (%rax), %xmm19
+# CHECK-NEXT: 2 8 1.00 * U vpexpandw (%rax), %xmm19
# CHECK-NEXT: 1 1 1.00 vpexpandw %xmm16, %xmm19 {%k1}
# CHECK-NEXT: 2 8 1.00 * vpexpandw (%rax), %xmm19 {%k1}
# CHECK-NEXT: 1 1 1.00 vpexpandw %xmm16, %xmm19 {%k1} {z}
# CHECK-NEXT: 1 1 1.00 U vpexpandw %ymm16, %ymm19
-# CHECK-NEXT: 2 8 1.00 U vpexpandw (%rax), %ymm19
+# CHECK-NEXT: 2 8 1.00 * U vpexpandw (%rax), %ymm19
# CHECK-NEXT: 1 1 1.00 vpexpandw %ymm16, %ymm19 {%k1}
# CHECK-NEXT: 2 8 1.00 * vpexpandw (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 1 1.00 vpexpandw %ymm16, %ymm19 {%k1} {z}
diff --git a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2.s b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2.s
index 08f07dc..5c987ee 100644
--- a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2.s
+++ b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2.s
@@ -153,12 +153,12 @@ vpshrdw $1, (%rax), %zmm17, %zmm19 {k1}{z}
# CHECK-NEXT: 2 10 1.00 * vpcompressw %zmm16, (%rax) {%k1}
# CHECK-NEXT: 1 3 1.00 vpcompressw %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 1 3 1.00 U vpexpandb %zmm16, %zmm19
-# CHECK-NEXT: 2 10 1.00 U vpexpandb (%rax), %zmm19
+# CHECK-NEXT: 2 10 1.00 * U vpexpandb (%rax), %zmm19
# CHECK-NEXT: 1 3 1.00 vpexpandb %zmm16, %zmm19 {%k1}
# CHECK-NEXT: 2 10 1.00 * vpexpandb (%rax), %zmm19 {%k1}
# CHECK-NEXT: 1 3 1.00 vpexpandb %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 1 3 1.00 U vpexpandw %zmm16, %zmm19
-# CHECK-NEXT: 2 10 1.00 U vpexpandw (%rax), %zmm19
+# CHECK-NEXT: 2 10 1.00 * U vpexpandw (%rax), %zmm19
# CHECK-NEXT: 1 3 1.00 vpexpandw %zmm16, %zmm19 {%k1}
# CHECK-NEXT: 2 10 1.00 * vpexpandw (%rax), %zmm19 {%k1}
# CHECK-NEXT: 1 3 1.00 vpexpandw %zmm16, %zmm19 {%k1} {z}
diff --git a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2vl.s b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2vl.s
index 0194303..023026b 100644
--- a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2vl.s
+++ b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512vbmi2vl.s
@@ -295,22 +295,22 @@ vpshrdw $1, (%rax), %ymm17, %ymm19 {k1}{z}
# CHECK-NEXT: 2 10 1.00 * vpcompressw %ymm16, (%rax) {%k1}
# CHECK-NEXT: 1 3 1.00 vpcompressw %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 1 3 1.00 U vpexpandb %xmm16, %xmm19
-# CHECK-NEXT: 2 10 1.00 U vpexpandb (%rax), %xmm19
+# CHECK-NEXT: 2 10 1.00 * U vpexpandb (%rax), %xmm19
# CHECK-NEXT: 1 3 1.00 vpexpandb %xmm16, %xmm19 {%k1}
# CHECK-NEXT: 2 10 1.00 * vpexpandb (%rax), %xmm19 {%k1}
# CHECK-NEXT: 1 3 1.00 vpexpandb %xmm16, %xmm19 {%k1} {z}
# CHECK-NEXT: 1 3 1.00 U vpexpandb %ymm16, %ymm19
-# CHECK-NEXT: 2 10 1.00 U vpexpandb (%rax), %ymm19
+# CHECK-NEXT: 2 10 1.00 * U vpexpandb (%rax), %ymm19
# CHECK-NEXT: 1 3 1.00 vpexpandb %ymm16, %ymm19 {%k1}
# CHECK-NEXT: 2 10 1.00 * vpexpandb (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 3 1.00 vpexpandb %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 1 3 1.00 U vpexpandw %xmm16, %xmm19
-# CHECK-NEXT: 2 10 1.00 U vpexpandw (%rax), %xmm19
+# CHECK-NEXT: 2 10 1.00 * U vpexpandw (%rax), %xmm19
# CHECK-NEXT: 1 3 1.00 vpexpandw %xmm16, %xmm19 {%k1}
# CHECK-NEXT: 2 10 1.00 * vpexpandw (%rax), %xmm19 {%k1}
# CHECK-NEXT: 1 3 1.00 vpexpandw %xmm16, %xmm19 {%k1} {z}
# CHECK-NEXT: 1 3 1.00 U vpexpandw %ymm16, %ymm19
-# CHECK-NEXT: 2 10 1.00 U vpexpandw (%rax), %ymm19
+# CHECK-NEXT: 2 10 1.00 * U vpexpandw (%rax), %ymm19
# CHECK-NEXT: 1 3 1.00 vpexpandw %ymm16, %ymm19 {%k1}
# CHECK-NEXT: 2 10 1.00 * vpexpandw (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 3 1.00 vpexpandw %ymm16, %ymm19 {%k1} {z}
diff --git a/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2.s b/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2.s
index ed8a417..db1f9af 100644
--- a/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2.s
+++ b/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2.s
@@ -153,12 +153,12 @@ vpshrdw $1, (%rax), %zmm17, %zmm19 {k1}{z}
# CHECK-NEXT: 6 14 2.00 * vpcompressw %zmm16, (%rax) {%k1}
# CHECK-NEXT: 2 6 2.00 vpcompressw %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 2 3 2.00 U vpexpandb %zmm16, %zmm19
-# CHECK-NEXT: 3 11 2.00 U vpexpandb (%rax), %zmm19
+# CHECK-NEXT: 3 11 2.00 * U vpexpandb (%rax), %zmm19
# CHECK-NEXT: 2 8 2.00 vpexpandb %zmm16, %zmm19 {%k1}
# CHECK-NEXT: 3 13 2.00 * vpexpandb (%rax), %zmm19 {%k1}
# CHECK-NEXT: 2 8 2.00 vpexpandb %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 2 3 2.00 U vpexpandw %zmm16, %zmm19
-# CHECK-NEXT: 3 11 2.00 U vpexpandw (%rax), %zmm19
+# CHECK-NEXT: 3 11 2.00 * U vpexpandw (%rax), %zmm19
# CHECK-NEXT: 2 8 2.00 vpexpandw %zmm16, %zmm19 {%k1}
# CHECK-NEXT: 3 13 2.00 * vpexpandw (%rax), %zmm19 {%k1}
# CHECK-NEXT: 2 8 2.00 vpexpandw %zmm16, %zmm19 {%k1} {z}
diff --git a/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2vl.s b/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2vl.s
index 3db09bc..9277a91 100644
--- a/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2vl.s
+++ b/llvm/test/tools/llvm-mca/X86/SapphireRapids/resources-avx512vbmi2vl.s
@@ -295,22 +295,22 @@ vpshrdw $1, (%rax), %ymm17, %ymm19 {k1}{z}
# CHECK-NEXT: 6 14 2.00 * vpcompressw %ymm16, (%rax) {%k1}
# CHECK-NEXT: 2 6 2.00 vpcompressw %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 2 3 2.00 U vpexpandb %xmm16, %xmm19
-# CHECK-NEXT: 3 10 2.00 U vpexpandb (%rax), %xmm19
+# CHECK-NEXT: 3 10 2.00 * U vpexpandb (%rax), %xmm19
# CHECK-NEXT: 2 8 2.00 vpexpandb %xmm16, %xmm19 {%k1}
# CHECK-NEXT: 3 13 2.00 * vpexpandb (%rax), %xmm19 {%k1}
# CHECK-NEXT: 2 8 2.00 vpexpandb %xmm16, %xmm19 {%k1} {z}
# CHECK-NEXT: 2 3 2.00 U vpexpandb %ymm16, %ymm19
-# CHECK-NEXT: 3 11 2.00 U vpexpandb (%rax), %ymm19
+# CHECK-NEXT: 3 11 2.00 * U vpexpandb (%rax), %ymm19
# CHECK-NEXT: 2 8 2.00 vpexpandb %ymm16, %ymm19 {%k1}
# CHECK-NEXT: 3 13 2.00 * vpexpandb (%rax), %ymm19 {%k1}
# CHECK-NEXT: 2 8 2.00 vpexpandb %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 2 3 2.00 U vpexpandw %xmm16, %xmm19
-# CHECK-NEXT: 3 10 2.00 U vpexpandw (%rax), %xmm19
+# CHECK-NEXT: 3 10 2.00 * U vpexpandw (%rax), %xmm19
# CHECK-NEXT: 2 8 2.00 vpexpandw %xmm16, %xmm19 {%k1}
# CHECK-NEXT: 3 13 2.00 * vpexpandw (%rax), %xmm19 {%k1}
# CHECK-NEXT: 2 8 2.00 vpexpandw %xmm16, %xmm19 {%k1} {z}
# CHECK-NEXT: 2 3 2.00 U vpexpandw %ymm16, %ymm19
-# CHECK-NEXT: 3 11 2.00 U vpexpandw (%rax), %ymm19
+# CHECK-NEXT: 3 11 2.00 * U vpexpandw (%rax), %ymm19
# CHECK-NEXT: 2 8 2.00 vpexpandw %ymm16, %ymm19 {%k1}
# CHECK-NEXT: 3 13 2.00 * vpexpandw (%rax), %ymm19 {%k1}
# CHECK-NEXT: 2 8 2.00 vpexpandw %ymm16, %ymm19 {%k1} {z}
diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2.s
index 594518d..88e140d 100644
--- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2.s
+++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2.s
@@ -153,12 +153,12 @@ vpshrdw $1, (%rax), %zmm17, %zmm19 {k1}{z}
# CHECK-NEXT: 2 8 0.50 * vpcompressw %zmm16, (%rax) {%k1}
# CHECK-NEXT: 1 5 1.00 vpcompressw %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 1 5 1.00 U vpexpandb %zmm16, %zmm19
-# CHECK-NEXT: 2 8 0.50 U vpexpandb (%rax), %zmm19
+# CHECK-NEXT: 2 8 0.50 * U vpexpandb (%rax), %zmm19
# CHECK-NEXT: 1 5 1.00 vpexpandb %zmm16, %zmm19 {%k1}
# CHECK-NEXT: 2 8 0.50 * vpexpandb (%rax), %zmm19 {%k1}
# CHECK-NEXT: 1 5 1.00 vpexpandb %zmm16, %zmm19 {%k1} {z}
# CHECK-NEXT: 1 5 1.00 U vpexpandw %zmm16, %zmm19
-# CHECK-NEXT: 2 8 0.50 U vpexpandw (%rax), %zmm19
+# CHECK-NEXT: 2 8 0.50 * U vpexpandw (%rax), %zmm19
# CHECK-NEXT: 1 5 1.00 vpexpandw %zmm16, %zmm19 {%k1}
# CHECK-NEXT: 2 8 0.50 * vpexpandw (%rax), %zmm19 {%k1}
# CHECK-NEXT: 1 5 1.00 vpexpandw %zmm16, %zmm19 {%k1} {z}
diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2vl.s b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2vl.s
index 7b9c2516..325835a 100644
--- a/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2vl.s
+++ b/llvm/test/tools/llvm-mca/X86/Znver4/resources-avx512vbmi2vl.s
@@ -295,22 +295,22 @@ vpshrdw $1, (%rax), %ymm17, %ymm19 {k1}{z}
# CHECK-NEXT: 2 8 0.50 * vpcompressw %ymm16, (%rax) {%k1}
# CHECK-NEXT: 1 4 1.00 vpcompressw %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 2 1 0.50 U vpexpandb %xmm16, %xmm19
-# CHECK-NEXT: 2 8 0.50 U vpexpandb (%rax), %xmm19
+# CHECK-NEXT: 2 8 0.50 * U vpexpandb (%rax), %xmm19
# CHECK-NEXT: 2 1 0.50 vpexpandb %xmm16, %xmm19 {%k1}
# CHECK-NEXT: 2 8 0.50 * vpexpandb (%rax), %xmm19 {%k1}
# CHECK-NEXT: 2 1 0.50 vpexpandb %xmm16, %xmm19 {%k1} {z}
# CHECK-NEXT: 1 4 1.00 U vpexpandb %ymm16, %ymm19
-# CHECK-NEXT: 2 8 0.50 U vpexpandb (%rax), %ymm19
+# CHECK-NEXT: 2 8 0.50 * U vpexpandb (%rax), %ymm19
# CHECK-NEXT: 1 4 1.00 vpexpandb %ymm16, %ymm19 {%k1}
# CHECK-NEXT: 2 8 0.50 * vpexpandb (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 4 1.00 vpexpandb %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 2 1 0.50 U vpexpandw %xmm16, %xmm19
-# CHECK-NEXT: 2 8 0.50 U vpexpandw (%rax), %xmm19
+# CHECK-NEXT: 2 8 0.50 * U vpexpandw (%rax), %xmm19
# CHECK-NEXT: 2 1 0.50 vpexpandw %xmm16, %xmm19 {%k1}
# CHECK-NEXT: 2 8 0.50 * vpexpandw (%rax), %xmm19 {%k1}
# CHECK-NEXT: 2 1 0.50 vpexpandw %xmm16, %xmm19 {%k1} {z}
# CHECK-NEXT: 1 4 1.00 U vpexpandw %ymm16, %ymm19
-# CHECK-NEXT: 2 8 0.50 U vpexpandw (%rax), %ymm19
+# CHECK-NEXT: 2 8 0.50 * U vpexpandw (%rax), %ymm19
# CHECK-NEXT: 1 4 1.00 vpexpandw %ymm16, %ymm19 {%k1}
# CHECK-NEXT: 2 8 0.50 * vpexpandw (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 4 1.00 vpexpandw %ymm16, %ymm19 {%k1} {z}
diff --git a/llvm/tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp b/llvm/tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp
index bdfc93e..707e6ee 100644
--- a/llvm/tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp
+++ b/llvm/tools/llvm-exegesis/lib/SerialSnippetGenerator.cpp
@@ -57,6 +57,12 @@ computeAliasingInstructions(const LLVMState &State, const Instruction *Instr,
continue;
if (OtherInstr.hasMemoryOperands())
continue;
+ // Filtering out loads/stores might belong in hasMemoryOperands(), but that
+ // complicates things as there are instructions with may load/store that
+ // don't have operands (e.g. X86's CLUI instruction). So, it's easier to
+ // filter them out here.
+ if (OtherInstr.Description.mayLoad() || OtherInstr.Description.mayStore())
+ continue;
if (!ET.allowAsBackToBack(OtherInstr))
continue;
if (Instr->hasAliasingRegistersThrough(OtherInstr, ForbiddenRegisters))
diff --git a/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp b/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp
index d8457a3..d1c0f64 100644
--- a/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp
+++ b/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp
@@ -230,7 +230,8 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef)
CallBase *Call = findCall(*Func, "call");
Trie.buildAndAttachMIBMetadata(Call);
- EXPECT_FALSE(Call->hasFnAttr("memprof"));
+ EXPECT_TRUE(Call->hasFnAttr("memprof"));
+ EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous");
EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof));
MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof);
ASSERT_EQ(MemProfMD->getNumOperands(), 2u);
@@ -279,7 +280,8 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef)
CallBase *Call = findCall(*Func, "call");
Trie.buildAndAttachMIBMetadata(Call);
- EXPECT_FALSE(Call->hasFnAttr("memprof"));
+ EXPECT_TRUE(Call->hasFnAttr("memprof"));
+ EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous");
EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof));
MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof);
ASSERT_EQ(MemProfMD->getNumOperands(), 2u);
@@ -333,7 +335,8 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef)
CallBase *Call = findCall(*Func, "call");
Trie.buildAndAttachMIBMetadata(Call);
- EXPECT_FALSE(Call->hasFnAttr("memprof"));
+ EXPECT_TRUE(Call->hasFnAttr("memprof"));
+ EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous");
EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof));
MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof);
ASSERT_EQ(MemProfMD->getNumOperands(), 2u);
@@ -392,7 +395,8 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef)
CallBase *Call = findCall(*Func, "call");
Trie.buildAndAttachMIBMetadata(Call);
- EXPECT_FALSE(Call->hasFnAttr("memprof"));
+ EXPECT_TRUE(Call->hasFnAttr("memprof"));
+ EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous");
EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof));
MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof);
ASSERT_EQ(MemProfMD->getNumOperands(), 2u);
@@ -463,7 +467,8 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef)
ASSERT_NE(Call, nullptr);
Trie.buildAndAttachMIBMetadata(Call);
- EXPECT_FALSE(Call->hasFnAttr("memprof"));
+ EXPECT_TRUE(Call->hasFnAttr("memprof"));
+ EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous");
EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof));
MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof);
EXPECT_THAT(MemProfMD, MemprofMetadataEquals(ExpectedVals));
@@ -536,7 +541,8 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef)
// Restore original option value.
MemProfKeepAllNotColdContexts = OrigMemProfKeepAllNotColdContexts;
- EXPECT_FALSE(Call->hasFnAttr("memprof"));
+ EXPECT_TRUE(Call->hasFnAttr("memprof"));
+ EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous");
EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof));
MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof);
EXPECT_THAT(MemProfMD, MemprofMetadataEquals(ExpectedVals));
@@ -664,7 +670,8 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef)
// The hot allocations will be converted to NotCold and pruned as they
// are unnecessary to determine how to clone the cold allocation.
- EXPECT_FALSE(Call->hasFnAttr("memprof"));
+ EXPECT_TRUE(Call->hasFnAttr("memprof"));
+ EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous");
EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof));
MemProfMD = Call->getMetadata(LLVMContext::MD_memprof);
ASSERT_EQ(MemProfMD->getNumOperands(), 2u);